2024-12-01 22:48:42,613 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 22:48:42,623 main DEBUG Took 0.008397 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 22:48:42,623 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 22:48:42,623 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 22:48:42,624 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 22:48:42,625 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,638 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 22:48:42,649 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,650 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,650 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,650 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,651 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,651 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,652 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,652 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,652 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,653 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,653 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,653 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,654 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,654 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,654 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,655 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,655 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,655 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,656 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,656 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,656 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,656 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,657 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 22:48:42,657 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,658 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 22:48:42,659 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 22:48:42,660 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 22:48:42,661 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 22:48:42,662 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 22:48:42,663 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 22:48:42,663 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 22:48:42,671 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 22:48:42,673 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 22:48:42,674 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 22:48:42,675 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 22:48:42,675 main DEBUG createAppenders(={Console}) 2024-12-01 22:48:42,676 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-01 22:48:42,676 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 22:48:42,676 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-01 22:48:42,677 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 22:48:42,677 main DEBUG OutputStream closed 2024-12-01 22:48:42,677 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 22:48:42,677 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 22:48:42,678 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-01 22:48:42,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 22:48:42,738 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 22:48:42,739 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 22:48:42,740 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 22:48:42,740 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 22:48:42,741 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 22:48:42,741 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 22:48:42,741 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 22:48:42,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 22:48:42,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 22:48:42,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 22:48:42,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 22:48:42,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 22:48:42,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 22:48:42,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 22:48:42,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 22:48:42,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 22:48:42,744 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 22:48:42,746 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 22:48:42,747 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-01 22:48:42,747 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 22:48:42,747 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-01T22:48:42,762 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-01 22:48:42,765 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 22:48:42,765 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T22:48:43,010 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680 2024-12-01T22:48:43,032 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323, deleteOnExit=true 2024-12-01T22:48:43,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/test.cache.data in system properties and HBase conf 2024-12-01T22:48:43,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T22:48:43,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir in system properties and HBase conf 2024-12-01T22:48:43,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T22:48:43,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T22:48:43,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T22:48:43,126 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T22:48:43,206 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T22:48:43,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T22:48:43,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T22:48:43,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T22:48:43,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T22:48:43,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T22:48:43,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T22:48:43,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T22:48:43,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T22:48:43,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T22:48:43,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/nfs.dump.dir in system properties and HBase conf 2024-12-01T22:48:43,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/java.io.tmpdir in system properties and HBase conf 2024-12-01T22:48:43,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T22:48:43,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T22:48:43,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T22:48:44,235 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T22:48:44,300 INFO [Time-limited test {}] log.Log(170): Logging initialized @2258ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T22:48:44,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:44,418 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:44,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:44,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:44,437 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:44,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:44,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:44,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:44,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/java.io.tmpdir/jetty-localhost-39175-hadoop-hdfs-3_4_1-tests_jar-_-any-5470999212919074320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T22:48:44,617 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:39175} 2024-12-01T22:48:44,618 INFO [Time-limited test {}] server.Server(415): Started @2577ms 2024-12-01T22:48:45,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:45,141 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:45,142 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:45,142 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:45,143 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T22:48:45,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:45,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:45,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/java.io.tmpdir/jetty-localhost-40609-hadoop-hdfs-3_4_1-tests_jar-_-any-71141333431095694/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:45,237 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:40609} 2024-12-01T22:48:45,237 INFO [Time-limited test {}] server.Server(415): Started @3197ms 2024-12-01T22:48:45,281 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:45,378 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:45,385 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:45,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:45,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:45,386 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T22:48:45,387 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:45,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:45,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/java.io.tmpdir/jetty-localhost-38955-hadoop-hdfs-3_4_1-tests_jar-_-any-15064275409811606972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:45,493 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:38955} 2024-12-01T22:48:45,494 INFO [Time-limited test {}] server.Server(415): Started @3453ms 2024-12-01T22:48:45,496 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:45,533 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:45,537 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:45,539 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:45,539 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:45,539 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:45,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:45,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:45,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/java.io.tmpdir/jetty-localhost-40543-hadoop-hdfs-3_4_1-tests_jar-_-any-3900942209953974302/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:45,633 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:40543} 2024-12-01T22:48:45,633 INFO [Time-limited test {}] server.Server(415): Started @3592ms 2024-12-01T22:48:45,635 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:46,939 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data4/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:46,939 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data3/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:46,973 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:46,987 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data2/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:46,987 WARN [Thread-131 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data1/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:47,006 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a1c1ae8e6222efd with lease ID 0x38f6e509843eb6b1: Processing first storage report for DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb from datanode DatanodeRegistration(127.0.0.1:34251, datanodeUuid=ee475698-2a10-4d8e-b077-05c7eba9188c, infoPort=34957, infoSecurePort=0, ipcPort=33189, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a1c1ae8e6222efd with lease ID 0x38f6e509843eb6b1: from storage DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb node DatanodeRegistration(127.0.0.1:34251, datanodeUuid=ee475698-2a10-4d8e-b077-05c7eba9188c, infoPort=34957, infoSecurePort=0, ipcPort=33189, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ff8ba7d70006019 with lease ID 0x38f6e509843eb6b0: Processing first storage report for DS-c7ebebbd-f83b-46e4-a67d-a453025140ef from datanode DatanodeRegistration(127.0.0.1:43459, datanodeUuid=bb0b79aa-5167-4918-8e04-5f537c070dc7, infoPort=39341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ff8ba7d70006019 with lease ID 0x38f6e509843eb6b0: from storage DS-c7ebebbd-f83b-46e4-a67d-a453025140ef node DatanodeRegistration(127.0.0.1:43459, datanodeUuid=bb0b79aa-5167-4918-8e04-5f537c070dc7, infoPort=39341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a1c1ae8e6222efd with lease ID 0x38f6e509843eb6b1: Processing first storage report for DS-677f651a-aac1-47aa-a2b1-77e1009cbc35 from datanode DatanodeRegistration(127.0.0.1:34251, datanodeUuid=ee475698-2a10-4d8e-b077-05c7eba9188c, infoPort=34957, infoSecurePort=0, ipcPort=33189, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a1c1ae8e6222efd with lease ID 0x38f6e509843eb6b1: from storage DS-677f651a-aac1-47aa-a2b1-77e1009cbc35 node DatanodeRegistration(127.0.0.1:34251, datanodeUuid=ee475698-2a10-4d8e-b077-05c7eba9188c, infoPort=34957, infoSecurePort=0, ipcPort=33189, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,022 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ff8ba7d70006019 with lease ID 0x38f6e509843eb6b0: Processing first storage report for DS-2866d442-d404-429c-9d21-e54e980f22ab from datanode DatanodeRegistration(127.0.0.1:43459, datanodeUuid=bb0b79aa-5167-4918-8e04-5f537c070dc7, infoPort=39341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,022 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ff8ba7d70006019 with lease ID 0x38f6e509843eb6b0: from storage DS-2866d442-d404-429c-9d21-e54e980f22ab node DatanodeRegistration(127.0.0.1:43459, datanodeUuid=bb0b79aa-5167-4918-8e04-5f537c070dc7, infoPort=39341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,162 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data5/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:47,162 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data6/current/BP-1415834449-172.17.0.2-1733093323724/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:47,179 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:47,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x818adeb9b836e798 with lease ID 0x38f6e509843eb6b2: Processing first storage report for DS-fcd3638e-7492-409f-a956-f5c896f3c2c8 from datanode DatanodeRegistration(127.0.0.1:37053, datanodeUuid=0ebe535e-2a4a-4998-9d26-8b618c802e37, infoPort=38871, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x818adeb9b836e798 with lease ID 0x38f6e509843eb6b2: from storage DS-fcd3638e-7492-409f-a956-f5c896f3c2c8 node DatanodeRegistration(127.0.0.1:37053, datanodeUuid=0ebe535e-2a4a-4998-9d26-8b618c802e37, infoPort=38871, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x818adeb9b836e798 with lease ID 0x38f6e509843eb6b2: Processing first storage report for DS-2b4b08ea-9c3f-4d3f-ac89-7734a5b5df40 from datanode DatanodeRegistration(127.0.0.1:37053, datanodeUuid=0ebe535e-2a4a-4998-9d26-8b618c802e37, infoPort=38871, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724) 2024-12-01T22:48:47,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x818adeb9b836e798 with lease ID 0x38f6e509843eb6b2: from storage DS-2b4b08ea-9c3f-4d3f-ac89-7734a5b5df40 node DatanodeRegistration(127.0.0.1:37053, datanodeUuid=0ebe535e-2a4a-4998-9d26-8b618c802e37, infoPort=38871, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=1573146401;c=1733093323724), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:47,223 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680 2024-12-01T22:48:47,288 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-01T22:48:47,335 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=334, ProcessCount=11, AvailableMemoryMB=9748 2024-12-01T22:48:47,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T22:48:47,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-01T22:48:47,423 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/zookeeper_0, clientPort=55917, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T22:48:47,433 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55917 2024-12-01T22:48:47,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:47,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:47,538 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:47,538 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:47,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:59196 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59196 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:47,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-01T22:48:47,995 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:48,007 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 with version=8 2024-12-01T22:48:48,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/hbase-staging 2024-12-01T22:48:48,088 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T22:48:48,306 INFO [Time-limited test {}] client.ConnectionUtils(128): master/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:48,314 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,319 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:48,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:48,444 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T22:48:48,491 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T22:48:48,498 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T22:48:48,501 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:48,521 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 65848 (auto-detected) 2024-12-01T22:48:48,522 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-01T22:48:48,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38179 2024-12-01T22:48:48,554 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38179 connecting to ZooKeeper ensemble=127.0.0.1:55917 2024-12-01T22:48:48,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:381790x0, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:48,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38179-0x10193157dd90000 connected 2024-12-01T22:48:48,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:48,797 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0, hbase.cluster.distributed=false 2024-12-01T22:48:48,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:48,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-01T22:48:48,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38179 2024-12-01T22:48:48,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38179 2024-12-01T22:48:48,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-01T22:48:48,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-01T22:48:48,906 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:48,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,908 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:48,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:48,910 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:48,912 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:48,913 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42517 2024-12-01T22:48:48,914 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42517 connecting to ZooKeeper ensemble=127.0.0.1:55917 2024-12-01T22:48:48,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425170x0, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:48,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425170x0, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:48,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42517-0x10193157dd90001 connected 2024-12-01T22:48:48,930 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:48,937 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:48,939 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:48,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:48,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42517 2024-12-01T22:48:48,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42517 2024-12-01T22:48:48,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42517 2024-12-01T22:48:48,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42517 2024-12-01T22:48:48,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42517 2024-12-01T22:48:48,961 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:48,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,962 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:48,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:48,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:48,962 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:48,962 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:48,963 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32909 2024-12-01T22:48:48,964 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32909 connecting to ZooKeeper ensemble=127.0.0.1:55917 2024-12-01T22:48:48,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329090x0, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:48,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329090x0, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:48,979 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32909-0x10193157dd90002 connected 2024-12-01T22:48:48,980 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:48,982 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:48,983 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:48,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:48,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32909 2024-12-01T22:48:48,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32909 2024-12-01T22:48:48,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32909 2024-12-01T22:48:48,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32909 2024-12-01T22:48:48,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32909 2024-12-01T22:48:49,006 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:49,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:49,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:49,007 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:49,007 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:49,007 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:49,007 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:49,007 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:49,009 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43553 2024-12-01T22:48:49,010 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43553 connecting to ZooKeeper ensemble=127.0.0.1:55917 2024-12-01T22:48:49,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:49,013 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:49,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:435530x0, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:49,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:435530x0, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:49,021 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43553-0x10193157dd90003 connected 2024-12-01T22:48:49,022 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:49,023 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:49,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:49,026 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:49,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43553 2024-12-01T22:48:49,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43553 2024-12-01T22:48:49,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43553 2024-12-01T22:48:49,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43553 2024-12-01T22:48:49,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43553 2024-12-01T22:48:49,046 DEBUG [M:0;14efca635be3:38179 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;14efca635be3:38179 2024-12-01T22:48:49,046 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/14efca635be3,38179,1733093328163 2024-12-01T22:48:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/14efca635be3,38179,1733093328163 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,096 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T22:48:49,098 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/14efca635be3,38179,1733093328163 from backup master directory 2024-12-01T22:48:49,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/14efca635be3,38179,1733093328163 2024-12-01T22:48:49,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:49,106 WARN [master/14efca635be3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:49,106 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=14efca635be3,38179,1733093328163 2024-12-01T22:48:49,109 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T22:48:49,110 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T22:48:49,171 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/hbase.id] with ID: a605e7bb-df38-4899-ada9-3f48a58472a4 2024-12-01T22:48:49,171 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/.tmp/hbase.id 2024-12-01T22:48:49,178 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,178 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:35998 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35998 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:49,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-01T22:48:49,188 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:49,189 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/.tmp/hbase.id]:[hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/hbase.id] 2024-12-01T22:48:49,233 INFO [master/14efca635be3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:49,238 INFO [master/14efca635be3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T22:48:49,255 INFO [master/14efca635be3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-01T22:48:49,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,274 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,274 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:59214 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59214 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:49,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-01T22:48:49,284 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:49,296 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T22:48:49,298 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T22:48:49,303 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T22:48:49,327 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,327 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,330 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:59228 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59228 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:49,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-01T22:48:49,335 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:49,352 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store 2024-12-01T22:48:49,366 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,367 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:49,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:59236 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59236 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:49,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-01T22:48:49,375 WARN [master/14efca635be3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:49,379 INFO [master/14efca635be3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T22:48:49,382 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:49,383 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T22:48:49,383 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:49,383 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:49,384 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T22:48:49,384 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:49,385 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:49,386 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733093329383Disabling compacts and flushes for region at 1733093329383Disabling writes for close at 1733093329384 (+1 ms)Writing region close event to WAL at 1733093329384Closed at 1733093329385 (+1 ms) 2024-12-01T22:48:49,388 WARN [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/.initializing 2024-12-01T22:48:49,388 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/WALs/14efca635be3,38179,1733093328163 2024-12-01T22:48:49,396 INFO [master/14efca635be3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T22:48:49,409 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C38179%2C1733093328163, suffix=, logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/WALs/14efca635be3,38179,1733093328163, archiveDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/oldWALs, maxLogs=10 2024-12-01T22:48:49,446 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/WALs/14efca635be3,38179,1733093328163/14efca635be3%2C38179%2C1733093328163.1733093329415, exclude list is [], retry=0 2024-12-01T22:48:49,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:49,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43459,DS-c7ebebbd-f83b-46e4-a67d-a453025140ef,DISK] 2024-12-01T22:48:49,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34251,DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb,DISK] 2024-12-01T22:48:49,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37053,DS-fcd3638e-7492-409f-a956-f5c896f3c2c8,DISK] 2024-12-01T22:48:49,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-01T22:48:49,505 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/WALs/14efca635be3,38179,1733093328163/14efca635be3%2C38179%2C1733093328163.1733093329415 2024-12-01T22:48:49,506 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39341:39341),(127.0.0.1/127.0.0.1:38871:38871),(127.0.0.1/127.0.0.1:34957:34957)] 2024-12-01T22:48:49,506 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:49,507 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:49,510 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,510 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T22:48:49,572 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:49,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:49,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T22:48:49,579 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:49,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:49,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T22:48:49,582 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:49,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:49,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T22:48:49,586 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:49,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:49,587 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,591 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,592 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,596 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,597 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,601 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:49,604 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:49,610 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:49,611 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60325061, jitterRate=-0.10108654201030731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:49,618 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733093329522Initializing all the Stores at 1733093329524 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093329525 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093329525Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093329525Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093329525Cleaning up temporary data from old regions at 1733093329597 (+72 ms)Region opened successfully at 1733093329618 (+21 ms) 2024-12-01T22:48:49,619 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T22:48:49,649 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26f2c297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:49,674 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T22:48:49,684 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T22:48:49,684 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T22:48:49,686 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T22:48:49,687 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-01T22:48:49,692 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-01T22:48:49,692 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T22:48:49,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T22:48:49,722 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T22:48:49,768 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T22:48:49,770 INFO [master/14efca635be3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T22:48:49,772 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T22:48:49,778 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T22:48:49,780 INFO [master/14efca635be3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T22:48:49,784 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T22:48:49,789 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T22:48:49,790 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T22:48:49,799 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T22:48:49,815 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T22:48:49,820 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,833 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=14efca635be3,38179,1733093328163, sessionid=0x10193157dd90000, setting cluster-up flag (Was=false) 2024-12-01T22:48:49,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,894 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T22:48:49,899 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=14efca635be3,38179,1733093328163 2024-12-01T22:48:49,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:49,957 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T22:48:49,962 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=14efca635be3,38179,1733093328163 2024-12-01T22:48:49,971 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T22:48:50,035 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:50,036 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(746): ClusterId : a605e7bb-df38-4899-ada9-3f48a58472a4 2024-12-01T22:48:50,037 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(746): ClusterId : a605e7bb-df38-4899-ada9-3f48a58472a4 2024-12-01T22:48:50,037 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(746): ClusterId : a605e7bb-df38-4899-ada9-3f48a58472a4 2024-12-01T22:48:50,039 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:50,039 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:50,039 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:50,044 INFO [master/14efca635be3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T22:48:50,050 INFO [master/14efca635be3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T22:48:50,055 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 14efca635be3,38179,1733093328163 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T22:48:50,064 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:50,064 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:50,064 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:50,065 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:50,065 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:50,065 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:50,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:50,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:50,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:50,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:50,066 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/14efca635be3:0, corePoolSize=10, maxPoolSize=10 2024-12-01T22:48:50,067 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,067 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:50,067 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,072 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733093360072 2024-12-01T22:48:50,072 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:50,073 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T22:48:50,074 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T22:48:50,075 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T22:48:50,079 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T22:48:50,079 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:50,079 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T22:48:50,079 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T22:48:50,079 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T22:48:50,080 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T22:48:50,081 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,085 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:50,085 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:50,085 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T22:48:50,085 DEBUG [RS:1;14efca635be3:32909 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58aadc5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:50,085 DEBUG [RS:2;14efca635be3:43553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69f969ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:50,086 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:50,087 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:50,087 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:50,087 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T22:48:50,087 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T22:48:50,087 DEBUG [RS:0;14efca635be3:42517 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e493172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:50,100 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T22:48:50,101 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T22:48:50,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:36024 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36024 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:50,106 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;14efca635be3:32909 2024-12-01T22:48:50,107 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093330102,5,FailOnTimeoutGroup] 2024-12-01T22:48:50,108 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093330108,5,FailOnTimeoutGroup] 2024-12-01T22:48:50,109 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,109 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T22:48:50,110 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,110 INFO [RS:1;14efca635be3:32909 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:50,111 INFO [RS:1;14efca635be3:32909 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:50,111 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,111 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:50,112 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;14efca635be3:43553 2024-12-01T22:48:50,112 INFO [RS:2;14efca635be3:43553 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:50,112 INFO [RS:2;14efca635be3:43553 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:50,112 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:50,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-01T22:48:50,114 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:50,114 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,38179,1733093328163 with port=43553, startcode=1733093329005 2024-12-01T22:48:50,114 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,38179,1733093328163 with port=32909, startcode=1733093328960 2024-12-01T22:48:50,114 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;14efca635be3:42517 2024-12-01T22:48:50,114 INFO [RS:0;14efca635be3:42517 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:50,114 INFO [RS:0;14efca635be3:42517 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:50,114 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:50,115 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T22:48:50,116 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 2024-12-01T22:48:50,116 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,38179,1733093328163 with port=42517, startcode=1733093328877 2024-12-01T22:48:50,126 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:50,127 DEBUG [RS:2;14efca635be3:43553 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:50,127 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:50,127 DEBUG [RS:1;14efca635be3:32909 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:50,127 DEBUG [RS:0;14efca635be3:42517 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:50,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:42534 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:37053:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42534 dst: /127.0.0.1:37053 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:50,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-01T22:48:50,143 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:50,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:50,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T22:48:50,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T22:48:50,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:50,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:50,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T22:48:50,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T22:48:50,158 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:50,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:50,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T22:48:50,162 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59579, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:50,162 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:50,162 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49739, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:50,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T22:48:50,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:50,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:50,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T22:48:50,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T22:48:50,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:50,168 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,42517,1733093328877 2024-12-01T22:48:50,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:50,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T22:48:50,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740 2024-12-01T22:48:50,170 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(517): Registering regionserver=14efca635be3,42517,1733093328877 2024-12-01T22:48:50,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740 2024-12-01T22:48:50,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T22:48:50,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T22:48:50,175 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:50,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T22:48:50,182 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,43553,1733093329005 2024-12-01T22:48:50,182 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(517): Registering regionserver=14efca635be3,43553,1733093329005 2024-12-01T22:48:50,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,32909,1733093328960 2024-12-01T22:48:50,186 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 2024-12-01T22:48:50,186 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 2024-12-01T22:48:50,187 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40035 2024-12-01T22:48:50,187 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40035 2024-12-01T22:48:50,187 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38179 {}] master.ServerManager(517): Registering regionserver=14efca635be3,32909,1733093328960 2024-12-01T22:48:50,187 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:50,187 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:50,191 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 2024-12-01T22:48:50,191 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40035 2024-12-01T22:48:50,191 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:50,201 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:50,202 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62190592, jitterRate=-0.0732879638671875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:50,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733093330144Initializing all the Stores at 1733093330146 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330146Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330147 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093330147Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330147Cleaning up temporary data from old regions at 1733093330174 (+27 ms)Region opened successfully at 1733093330204 (+30 ms) 2024-12-01T22:48:50,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T22:48:50,204 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T22:48:50,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T22:48:50,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T22:48:50,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T22:48:50,206 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:50,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733093330204Disabling compacts and flushes for region at 1733093330204Disabling writes for close at 1733093330205 (+1 ms)Writing region close event to WAL at 1733093330205Closed at 1733093330206 (+1 ms) 2024-12-01T22:48:50,209 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:50,209 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T22:48:50,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-01T22:48:50,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-01T22:48:50,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T22:48:50,224 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T22:48:50,228 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T22:48:50,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:50,241 DEBUG [RS:2;14efca635be3:43553 {}] zookeeper.ZKUtil(111): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,43553,1733093329005 2024-12-01T22:48:50,242 DEBUG [RS:0;14efca635be3:42517 {}] zookeeper.ZKUtil(111): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,42517,1733093328877 2024-12-01T22:48:50,242 WARN [RS:0;14efca635be3:42517 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:50,242 WARN [RS:2;14efca635be3:43553 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:50,242 INFO [RS:0;14efca635be3:42517 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T22:48:50,242 INFO [RS:2;14efca635be3:43553 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T22:48:50,242 DEBUG [RS:1;14efca635be3:32909 {}] zookeeper.ZKUtil(111): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,32909,1733093328960 2024-12-01T22:48:50,242 WARN [RS:1;14efca635be3:32909 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:50,242 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,42517,1733093328877 2024-12-01T22:48:50,242 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005 2024-12-01T22:48:50,242 INFO [RS:1;14efca635be3:32909 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T22:48:50,242 DEBUG [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,32909,1733093328960 2024-12-01T22:48:50,244 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,42517,1733093328877] 2024-12-01T22:48:50,244 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,32909,1733093328960] 2024-12-01T22:48:50,244 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,43553,1733093329005] 2024-12-01T22:48:50,267 INFO [RS:2;14efca635be3:43553 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:50,267 INFO [RS:0;14efca635be3:42517 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:50,267 INFO [RS:1;14efca635be3:32909 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:50,282 INFO [RS:0;14efca635be3:42517 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:50,282 INFO [RS:1;14efca635be3:32909 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:50,282 INFO [RS:2;14efca635be3:43553 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:50,288 INFO [RS:0;14efca635be3:42517 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:50,288 INFO [RS:1;14efca635be3:32909 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:50,288 INFO [RS:2;14efca635be3:43553 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:50,288 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,288 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,288 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,291 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:50,291 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:50,291 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:50,297 INFO [RS:0;14efca635be3:42517 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:50,297 INFO [RS:1;14efca635be3:32909 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:50,297 INFO [RS:2;14efca635be3:43553 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:50,298 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,298 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,298 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:50,299 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,299 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:1;14efca635be3:32909 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:50,300 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,300 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,301 DEBUG [RS:0;14efca635be3:42517 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,301 DEBUG [RS:2;14efca635be3:43553 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,32909,1733093328960-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,304 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,43553,1733093329005-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:50,305 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,305 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,42517,1733093328877-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:50,323 INFO [RS:1;14efca635be3:32909 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:50,325 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,32909,1733093328960-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,325 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,325 INFO [RS:1;14efca635be3:32909 {}] regionserver.Replication(171): 14efca635be3,32909,1733093328960 started 2024-12-01T22:48:50,330 INFO [RS:2;14efca635be3:43553 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:50,330 INFO [RS:0;14efca635be3:42517 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:50,331 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,43553,1733093329005-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,331 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,42517,1733093328877-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,331 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,331 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,331 INFO [RS:2;14efca635be3:43553 {}] regionserver.Replication(171): 14efca635be3,43553,1733093329005 started 2024-12-01T22:48:50,331 INFO [RS:0;14efca635be3:42517 {}] regionserver.Replication(171): 14efca635be3,42517,1733093328877 started 2024-12-01T22:48:50,349 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,350 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,32909,1733093328960, RpcServer on 14efca635be3/172.17.0.2:32909, sessionid=0x10193157dd90002 2024-12-01T22:48:50,351 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:50,351 DEBUG [RS:1;14efca635be3:32909 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,32909,1733093328960 2024-12-01T22:48:50,351 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,32909,1733093328960' 2024-12-01T22:48:50,351 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:50,353 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:50,353 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,353 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:50,353 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:50,353 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:50,354 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,42517,1733093328877, RpcServer on 14efca635be3/172.17.0.2:42517, sessionid=0x10193157dd90001 2024-12-01T22:48:50,354 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,43553,1733093329005, RpcServer on 14efca635be3/172.17.0.2:43553, sessionid=0x10193157dd90003 2024-12-01T22:48:50,354 DEBUG [RS:1;14efca635be3:32909 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,32909,1733093328960 2024-12-01T22:48:50,354 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,32909,1733093328960' 2024-12-01T22:48:50,354 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:50,354 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:50,354 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:50,354 DEBUG [RS:2;14efca635be3:43553 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,43553,1733093329005 2024-12-01T22:48:50,354 DEBUG [RS:0;14efca635be3:42517 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,42517,1733093328877 2024-12-01T22:48:50,354 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,42517,1733093328877' 2024-12-01T22:48:50,354 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,43553,1733093329005' 2024-12-01T22:48:50,354 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:50,354 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:50,354 DEBUG [RS:1;14efca635be3:32909 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:50,355 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:50,355 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:50,355 DEBUG [RS:1;14efca635be3:32909 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:50,355 INFO [RS:1;14efca635be3:32909 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:50,355 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:50,355 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:50,355 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:50,355 INFO [RS:1;14efca635be3:32909 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:50,355 DEBUG [RS:2;14efca635be3:43553 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,43553,1733093329005 2024-12-01T22:48:50,355 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:50,355 DEBUG [RS:0;14efca635be3:42517 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,42517,1733093328877 2024-12-01T22:48:50,355 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,43553,1733093329005' 2024-12-01T22:48:50,356 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,42517,1733093328877' 2024-12-01T22:48:50,356 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:50,356 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:50,356 DEBUG [RS:2;14efca635be3:43553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:50,356 DEBUG [RS:0;14efca635be3:42517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:50,357 DEBUG [RS:2;14efca635be3:43553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:50,357 DEBUG [RS:0;14efca635be3:42517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:50,357 INFO [RS:0;14efca635be3:42517 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:50,357 INFO [RS:2;14efca635be3:43553 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:50,357 INFO [RS:0;14efca635be3:42517 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:50,357 INFO [RS:2;14efca635be3:43553 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:50,379 WARN [14efca635be3:38179 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T22:48:50,462 INFO [RS:0;14efca635be3:42517 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T22:48:50,462 INFO [RS:2;14efca635be3:43553 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T22:48:50,462 INFO [RS:1;14efca635be3:32909 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T22:48:50,466 INFO [RS:0;14efca635be3:42517 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C42517%2C1733093328877, suffix=, logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,42517,1733093328877, archiveDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs, maxLogs=32 2024-12-01T22:48:50,466 INFO [RS:1;14efca635be3:32909 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C32909%2C1733093328960, suffix=, logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,32909,1733093328960, archiveDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs, maxLogs=32 2024-12-01T22:48:50,466 INFO [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C43553%2C1733093329005, suffix=, logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005, archiveDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs, maxLogs=32 2024-12-01T22:48:50,483 DEBUG [RS:0;14efca635be3:42517 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,42517,1733093328877/14efca635be3%2C42517%2C1733093328877.1733093330471, exclude list is [], retry=0 2024-12-01T22:48:50,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43459,DS-c7ebebbd-f83b-46e4-a67d-a453025140ef,DISK] 2024-12-01T22:48:50,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37053,DS-fcd3638e-7492-409f-a956-f5c896f3c2c8,DISK] 2024-12-01T22:48:50,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34251,DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb,DISK] 2024-12-01T22:48:50,490 DEBUG [RS:1;14efca635be3:32909 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,32909,1733093328960/14efca635be3%2C32909%2C1733093328960.1733093330471, exclude list is [], retry=0 2024-12-01T22:48:50,490 DEBUG [RS:2;14efca635be3:43553 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005/14efca635be3%2C43553%2C1733093329005.1733093330471, exclude list is [], retry=0 2024-12-01T22:48:50,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43459,DS-c7ebebbd-f83b-46e4-a67d-a453025140ef,DISK] 2024-12-01T22:48:50,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34251,DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb,DISK] 2024-12-01T22:48:50,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37053,DS-fcd3638e-7492-409f-a956-f5c896f3c2c8,DISK] 2024-12-01T22:48:50,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34251,DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb,DISK] 2024-12-01T22:48:50,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43459,DS-c7ebebbd-f83b-46e4-a67d-a453025140ef,DISK] 2024-12-01T22:48:50,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37053,DS-fcd3638e-7492-409f-a956-f5c896f3c2c8,DISK] 2024-12-01T22:48:50,527 INFO [RS:0;14efca635be3:42517 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,42517,1733093328877/14efca635be3%2C42517%2C1733093328877.1733093330471 2024-12-01T22:48:50,533 DEBUG [RS:0;14efca635be3:42517 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39341:39341),(127.0.0.1/127.0.0.1:38871:38871),(127.0.0.1/127.0.0.1:34957:34957)] 2024-12-01T22:48:50,535 INFO [RS:1;14efca635be3:32909 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,32909,1733093328960/14efca635be3%2C32909%2C1733093328960.1733093330471 2024-12-01T22:48:50,535 INFO [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005/14efca635be3%2C43553%2C1733093329005.1733093330471 2024-12-01T22:48:50,535 DEBUG [RS:1;14efca635be3:32909 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39341:39341),(127.0.0.1/127.0.0.1:34957:34957),(127.0.0.1/127.0.0.1:38871:38871)] 2024-12-01T22:48:50,535 DEBUG [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34957:34957),(127.0.0.1/127.0.0.1:39341:39341),(127.0.0.1/127.0.0.1:38871:38871)] 2024-12-01T22:48:50,634 DEBUG [14efca635be3:38179 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T22:48:50,644 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(204): Hosts are {14efca635be3=0} racks are {/default-rack=0} 2024-12-01T22:48:50,651 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T22:48:50,651 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T22:48:50,651 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T22:48:50,652 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T22:48:50,652 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T22:48:50,652 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T22:48:50,652 INFO [14efca635be3:38179 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T22:48:50,652 INFO [14efca635be3:38179 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T22:48:50,652 INFO [14efca635be3:38179 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T22:48:50,652 DEBUG [14efca635be3:38179 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T22:48:50,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=14efca635be3,43553,1733093329005 2024-12-01T22:48:50,664 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 14efca635be3,43553,1733093329005, state=OPENING 2024-12-01T22:48:50,734 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T22:48:50,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:50,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:50,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:50,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:50,760 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:50,760 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:50,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:50,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:50,763 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T22:48:50,765 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=14efca635be3,43553,1733093329005}] 2024-12-01T22:48:50,941 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T22:48:50,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34515, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T22:48:50,954 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T22:48:50,955 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T22:48:50,956 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-01T22:48:50,959 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C43553%2C1733093329005.meta, suffix=.meta, logDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005, archiveDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs, maxLogs=32 2024-12-01T22:48:50,973 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005/14efca635be3%2C43553%2C1733093329005.meta.1733093330961.meta, exclude list is [], retry=0 2024-12-01T22:48:50,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34251,DS-a35ca45b-b965-4093-a799-b50ab4ca2aeb,DISK] 2024-12-01T22:48:50,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43459,DS-c7ebebbd-f83b-46e4-a67d-a453025140ef,DISK] 2024-12-01T22:48:50,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37053,DS-fcd3638e-7492-409f-a956-f5c896f3c2c8,DISK] 2024-12-01T22:48:50,981 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/WALs/14efca635be3,43553,1733093329005/14efca635be3%2C43553%2C1733093329005.meta.1733093330961.meta 2024-12-01T22:48:50,981 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34957:34957),(127.0.0.1/127.0.0.1:38871:38871),(127.0.0.1/127.0.0.1:39341:39341)] 2024-12-01T22:48:50,981 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:50,983 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T22:48:50,985 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T22:48:50,989 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T22:48:50,993 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T22:48:50,994 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:50,994 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T22:48:50,994 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T22:48:50,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T22:48:50,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T22:48:51,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:51,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T22:48:51,002 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T22:48:51,002 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:51,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T22:48:51,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T22:48:51,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:51,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T22:48:51,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T22:48:51,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:51,009 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T22:48:51,010 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740 2024-12-01T22:48:51,013 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740 2024-12-01T22:48:51,015 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T22:48:51,015 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T22:48:51,016 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:51,018 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T22:48:51,019 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73924665, jitterRate=0.10156334936618805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:51,019 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T22:48:51,021 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733093330995Writing region info on filesystem at 1733093330995Initializing all the Stores at 1733093330997 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330997Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330997Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093330997Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093330997Cleaning up temporary data from old regions at 1733093331015 (+18 ms)Running coprocessor post-open hooks at 1733093331020 (+5 ms)Region opened successfully at 1733093331021 (+1 ms) 2024-12-01T22:48:51,028 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733093330930 2024-12-01T22:48:51,056 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T22:48:51,057 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T22:48:51,059 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=14efca635be3,43553,1733093329005 2024-12-01T22:48:51,061 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 14efca635be3,43553,1733093329005, state=OPEN 2024-12-01T22:48:51,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:51,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:51,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:51,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:51,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:51,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:51,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:51,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:51,116 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=14efca635be3,43553,1733093329005 2024-12-01T22:48:51,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T22:48:51,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=14efca635be3,43553,1733093329005 in 352 msec 2024-12-01T22:48:51,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T22:48:51,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 910 msec 2024-12-01T22:48:51,132 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:51,132 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T22:48:51,148 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T22:48:51,149 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=14efca635be3,43553,1733093329005, seqNum=-1] 2024-12-01T22:48:51,165 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T22:48:51,168 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35865, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T22:48:51,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1870 sec 2024-12-01T22:48:51,186 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733093331186, completionTime=-1 2024-12-01T22:48:51,188 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T22:48:51,189 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T22:48:51,215 INFO [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T22:48:51,215 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733093391215 2024-12-01T22:48:51,215 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733093451215 2024-12-01T22:48:51,215 INFO [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-12-01T22:48:51,216 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T22:48:51,224 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,224 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,225 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,226 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-14efca635be3:38179, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,227 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,227 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,233 DEBUG [master/14efca635be3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T22:48:51,258 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.152sec 2024-12-01T22:48:51,259 INFO [master/14efca635be3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T22:48:51,261 INFO [master/14efca635be3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T22:48:51,261 INFO [master/14efca635be3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T22:48:51,262 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T22:48:51,262 INFO [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T22:48:51,263 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:51,263 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T22:48:51,267 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T22:48:51,268 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T22:48:51,269 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38179,1733093328163-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:51,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ec3e598, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:51,366 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T22:48:51,366 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T22:48:51,369 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 14efca635be3,38179,-1 for getting cluster id 2024-12-01T22:48:51,371 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T22:48:51,379 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a605e7bb-df38-4899-ada9-3f48a58472a4' 2024-12-01T22:48:51,381 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T22:48:51,381 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a605e7bb-df38-4899-ada9-3f48a58472a4" 2024-12-01T22:48:51,382 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53d7b575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:51,382 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [14efca635be3,38179,-1] 2024-12-01T22:48:51,385 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T22:48:51,387 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:51,387 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T22:48:51,390 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6daae659, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:51,391 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T22:48:51,397 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=14efca635be3,43553,1733093329005, seqNum=-1] 2024-12-01T22:48:51,398 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T22:48:51,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T22:48:51,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=14efca635be3,38179,1733093328163 2024-12-01T22:48:51,431 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T22:48:51,435 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 14efca635be3,38179,1733093328163 2024-12-01T22:48:51,438 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@118b3aa5 2024-12-01T22:48:51,439 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T22:48:51,441 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T22:48:51,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T22:48:51,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T22:48:51,457 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T22:48:51,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T22:48:51,459 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,462 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T22:48:51,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:51,470 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:51,470 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:51,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:36088 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36088 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:51,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-01T22:48:51,482 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:51,485 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 84885a86647a3990efbf3fd1bf91f669, NAME => 'TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0 2024-12-01T22:48:51,492 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:51,492 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:51,498 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:36104 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36104 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:51,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-01T22:48:51,506 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:51,506 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:51,506 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 84885a86647a3990efbf3fd1bf91f669, disabling compactions & flushes 2024-12-01T22:48:51,506 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,506 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,507 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. after waiting 0 ms 2024-12-01T22:48:51,507 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,507 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,507 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 84885a86647a3990efbf3fd1bf91f669: Waiting for close lock at 1733093331506Disabling compacts and flushes for region at 1733093331506Disabling writes for close at 1733093331507 (+1 ms)Writing region close event to WAL at 1733093331507Closed at 1733093331507 2024-12-01T22:48:51,509 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T22:48:51,514 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733093331509"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733093331509"}]},"ts":"1733093331509"} 2024-12-01T22:48:51,518 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T22:48:51,520 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T22:48:51,523 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733093331520"}]},"ts":"1733093331520"} 2024-12-01T22:48:51,527 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T22:48:51,528 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {14efca635be3=0} racks are {/default-rack=0} 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T22:48:51,529 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T22:48:51,529 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T22:48:51,529 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T22:48:51,529 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T22:48:51,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=84885a86647a3990efbf3fd1bf91f669, ASSIGN}] 2024-12-01T22:48:51,534 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=84885a86647a3990efbf3fd1bf91f669, ASSIGN 2024-12-01T22:48:51,536 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=84885a86647a3990efbf3fd1bf91f669, ASSIGN; state=OFFLINE, location=14efca635be3,42517,1733093328877; forceNewPlan=false, retain=false 2024-12-01T22:48:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:51,690 INFO [14efca635be3:38179 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T22:48:51,691 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=84885a86647a3990efbf3fd1bf91f669, regionState=OPENING, regionLocation=14efca635be3,42517,1733093328877 2024-12-01T22:48:51,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=84885a86647a3990efbf3fd1bf91f669, ASSIGN because future has completed 2024-12-01T22:48:51,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84885a86647a3990efbf3fd1bf91f669, server=14efca635be3,42517,1733093328877}] 2024-12-01T22:48:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:51,858 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T22:48:51,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48083, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T22:48:51,868 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,868 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 84885a86647a3990efbf3fd1bf91f669, NAME => 'TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:51,869 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,869 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:51,869 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,869 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,872 INFO [StoreOpener-84885a86647a3990efbf3fd1bf91f669-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,875 INFO [StoreOpener-84885a86647a3990efbf3fd1bf91f669-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84885a86647a3990efbf3fd1bf91f669 columnFamilyName cf 2024-12-01T22:48:51,875 DEBUG [StoreOpener-84885a86647a3990efbf3fd1bf91f669-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:51,876 INFO [StoreOpener-84885a86647a3990efbf3fd1bf91f669-1 {}] regionserver.HStore(327): Store=84885a86647a3990efbf3fd1bf91f669/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:51,876 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,877 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,878 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,878 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,878 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,881 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,890 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:51,890 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 84885a86647a3990efbf3fd1bf91f669; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75028039, jitterRate=0.11800490319728851}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T22:48:51,891 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:51,891 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 84885a86647a3990efbf3fd1bf91f669: Running coprocessor pre-open hook at 1733093331870Writing region info on filesystem at 1733093331870Initializing all the Stores at 1733093331872 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093331872Cleaning up temporary data from old regions at 1733093331878 (+6 ms)Running coprocessor post-open hooks at 1733093331891 (+13 ms)Region opened successfully at 1733093331891 2024-12-01T22:48:51,893 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669., pid=6, masterSystemTime=1733093331857 2024-12-01T22:48:51,896 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,896 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:51,897 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=84885a86647a3990efbf3fd1bf91f669, regionState=OPEN, openSeqNum=2, regionLocation=14efca635be3,42517,1733093328877 2024-12-01T22:48:51,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84885a86647a3990efbf3fd1bf91f669, server=14efca635be3,42517,1733093328877 because future has completed 2024-12-01T22:48:51,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T22:48:51,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 84885a86647a3990efbf3fd1bf91f669, server=14efca635be3,42517,1733093328877 in 200 msec 2024-12-01T22:48:51,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T22:48:51,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=84885a86647a3990efbf3fd1bf91f669, ASSIGN in 376 msec 2024-12-01T22:48:51,912 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T22:48:51,912 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733093331912"}]},"ts":"1733093331912"} 2024-12-01T22:48:51,915 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T22:48:51,917 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T22:48:51,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 468 msec 2024-12-01T22:48:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:52,096 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T22:48:52,096 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T22:48:52,098 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T22:48:52,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T22:48:52,106 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T22:48:52,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T22:48:52,115 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669., hostname=14efca635be3,42517,1733093328877, seqNum=2] 2024-12-01T22:48:52,117 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T22:48:52,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35590, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T22:48:52,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T22:48:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T22:48:52,134 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T22:48:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:52,136 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T22:48:52,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T22:48:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:52,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42517 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T22:48:52,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,310 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 84885a86647a3990efbf3fd1bf91f669 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T22:48:52,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/.tmp/cf/2a1c4b6dfe7649a8bd3a42f9c00857fe is 36, key is row/cf:cq/1733093332119/Put/seqid=0 2024-12-01T22:48:52,363 WARN [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,363 WARN [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1205689971_22 at /127.0.0.1:34624 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34624 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-01T22:48:52,372 WARN [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:52,372 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/.tmp/cf/2a1c4b6dfe7649a8bd3a42f9c00857fe 2024-12-01T22:48:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/.tmp/cf/2a1c4b6dfe7649a8bd3a42f9c00857fe as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/cf/2a1c4b6dfe7649a8bd3a42f9c00857fe 2024-12-01T22:48:52,424 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/cf/2a1c4b6dfe7649a8bd3a42f9c00857fe, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T22:48:52,433 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 84885a86647a3990efbf3fd1bf91f669 in 121ms, sequenceid=5, compaction requested=false 2024-12-01T22:48:52,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-01T22:48:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 84885a86647a3990efbf3fd1bf91f669: 2024-12-01T22:48:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T22:48:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T22:48:52,446 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T22:48:52,446 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 306 msec 2024-12-01T22:48:52,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 319 msec 2024-12-01T22:48:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38179 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:52,455 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T22:48:52,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T22:48:52,471 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T22:48:52,471 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:52,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,477 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T22:48:52,477 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T22:48:52,477 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1099508225, stopped=false 2024-12-01T22:48:52,477 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=14efca635be3,38179,1733093328163 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:52,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:52,557 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T22:48:52,558 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T22:48:52,558 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:52,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:52,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:52,559 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,42517,1733093328877' ***** 2024-12-01T22:48:52,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:52,560 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:52,560 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,32909,1733093328960' ***** 2024-12-01T22:48:52,560 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:52,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:52,560 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,43553,1733093329005' ***** 2024-12-01T22:48:52,560 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:52,560 INFO [RS:0;14efca635be3:42517 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:52,560 INFO [RS:1;14efca635be3:32909 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:52,561 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:52,561 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:52,561 INFO [RS:1;14efca635be3:32909 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:52,561 INFO [RS:0;14efca635be3:42517 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:52,561 INFO [RS:1;14efca635be3:32909 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:52,561 INFO [RS:0;14efca635be3:42517 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:52,562 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,32909,1733093328960 2024-12-01T22:48:52,562 INFO [RS:1;14efca635be3:32909 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:52,562 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(3091): Received CLOSE for 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:52,562 INFO [RS:1;14efca635be3:32909 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;14efca635be3:32909. 2024-12-01T22:48:52,562 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:52,562 DEBUG [RS:1;14efca635be3:32909 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:52,562 DEBUG [RS:1;14efca635be3:32909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,43553,1733093329005 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:52,562 INFO [RS:2;14efca635be3:43553 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;14efca635be3:43553. 2024-12-01T22:48:52,563 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,32909,1733093328960; all regions closed. 2024-12-01T22:48:52,563 DEBUG [RS:2;14efca635be3:43553 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:52,563 DEBUG [RS:2;14efca635be3:43553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,563 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,42517,1733093328877 2024-12-01T22:48:52,563 INFO [RS:0;14efca635be3:42517 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:52,563 INFO [RS:2;14efca635be3:43553 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:52,563 INFO [RS:0;14efca635be3:42517 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;14efca635be3:42517. 2024-12-01T22:48:52,563 INFO [RS:2;14efca635be3:43553 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:52,563 INFO [RS:2;14efca635be3:43553 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:52,563 DEBUG [RS:0;14efca635be3:42517 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:52,563 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T22:48:52,563 DEBUG [RS:0;14efca635be3:42517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,563 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 84885a86647a3990efbf3fd1bf91f669, disabling compactions & flushes 2024-12-01T22:48:52,564 INFO [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,564 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T22:48:52,564 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,564 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T22:48:52,564 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-01T22:48:52,564 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. after waiting 0 ms 2024-12-01T22:48:52,564 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1325): Online Regions={84885a86647a3990efbf3fd1bf91f669=TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669.} 2024-12-01T22:48:52,564 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,565 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T22:48:52,565 DEBUG [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1351): Waiting on 84885a86647a3990efbf3fd1bf91f669 2024-12-01T22:48:52,565 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T22:48:52,565 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T22:48:52,565 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T22:48:52,565 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T22:48:52,565 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T22:48:52,566 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T22:48:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_1073741827_1017 (size=93) 2024-12-01T22:48:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_1073741827_1017 (size=93) 2024-12-01T22:48:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_1073741827_1017 (size=93) 2024-12-01T22:48:52,577 DEBUG [RS:1;14efca635be3:32909 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs 2024-12-01T22:48:52,577 INFO [RS:1;14efca635be3:32909 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 14efca635be3%2C32909%2C1733093328960:(num 1733093330471) 2024-12-01T22:48:52,577 DEBUG [RS:1;14efca635be3:32909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,577 INFO [RS:1;14efca635be3:32909 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:52,578 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:52,578 INFO [RS:1;14efca635be3:32909 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32909 2024-12-01T22:48:52,584 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/default/TestHBaseWalOnEC/84885a86647a3990efbf3fd1bf91f669/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T22:48:52,586 INFO [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,586 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 84885a86647a3990efbf3fd1bf91f669: Waiting for close lock at 1733093332563Running coprocessor pre-close hooks at 1733093332563Disabling compacts and flushes for region at 1733093332563Disabling writes for close at 1733093332564 (+1 ms)Writing region close event to WAL at 1733093332567 (+3 ms)Running coprocessor post-close hooks at 1733093332585 (+18 ms)Closed at 1733093332586 (+1 ms) 2024-12-01T22:48:52,587 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669. 2024-12-01T22:48:52,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:52,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,32909,1733093328960 2024-12-01T22:48:52,589 INFO [RS:1;14efca635be3:32909 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:52,590 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,32909,1733093328960] 2024-12-01T22:48:52,600 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/info/f0ad459762484113bddc6c28c1220223 is 153, key is TestHBaseWalOnEC,,1733093331443.84885a86647a3990efbf3fd1bf91f669./info:regioninfo/1733093331897/Put/seqid=0 2024-12-01T22:48:52,603 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,603 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,607 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_107215965_22 at /127.0.0.1:34640 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34640 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:52,607 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,609 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,32909,1733093328960 already deleted, retry=false 2024-12-01T22:48:52,610 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,32909,1733093328960 expired; onlineServers=2 2024-12-01T22:48:52,610 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,610 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-01T22:48:52,613 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:52,613 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/info/f0ad459762484113bddc6c28c1220223 2024-12-01T22:48:52,641 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/ns/78fcfc082e454ce28d0b155855080787 is 43, key is default/ns:d/1733093331171/Put/seqid=0 2024-12-01T22:48:52,643 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,644 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,649 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_107215965_22 at /127.0.0.1:48464 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:37053:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48464 dst: /127.0.0.1:37053 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:52,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-01T22:48:52,654 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:52,655 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/ns/78fcfc082e454ce28d0b155855080787 2024-12-01T22:48:52,691 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/table/fcbd6e81c47244d098646a9ac1ad2fd9 is 52, key is TestHBaseWalOnEC/table:state/1733093331912/Put/seqid=0 2024-12-01T22:48:52,693 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,694 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:52,699 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_107215965_22 at /127.0.0.1:48508 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37053:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48508 dst: /127.0.0.1:37053 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:52,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:52,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32909-0x10193157dd90002, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:52,700 INFO [RS:1;14efca635be3:32909 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:52,700 INFO [RS:1;14efca635be3:32909 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,32909,1733093328960; zookeeper connection closed. 2024-12-01T22:48:52,704 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e5d0e5f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e5d0e5f 2024-12-01T22:48:52,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-01T22:48:52,707 WARN [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:52,707 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/table/fcbd6e81c47244d098646a9ac1ad2fd9 2024-12-01T22:48:52,721 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/info/f0ad459762484113bddc6c28c1220223 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/info/f0ad459762484113bddc6c28c1220223 2024-12-01T22:48:52,732 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/info/f0ad459762484113bddc6c28c1220223, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T22:48:52,734 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/ns/78fcfc082e454ce28d0b155855080787 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/ns/78fcfc082e454ce28d0b155855080787 2024-12-01T22:48:52,745 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/ns/78fcfc082e454ce28d0b155855080787, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T22:48:52,746 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/.tmp/table/fcbd6e81c47244d098646a9ac1ad2fd9 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/table/fcbd6e81c47244d098646a9ac1ad2fd9 2024-12-01T22:48:52,758 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/table/fcbd6e81c47244d098646a9ac1ad2fd9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T22:48:52,760 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 195ms, sequenceid=11, compaction requested=false 2024-12-01T22:48:52,761 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T22:48:52,765 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,42517,1733093328877; all regions closed. 2024-12-01T22:48:52,765 DEBUG [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T22:48:52,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_1073741826_1016 (size=1298) 2024-12-01T22:48:52,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_1073741826_1016 (size=1298) 2024-12-01T22:48:52,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_1073741826_1016 (size=1298) 2024-12-01T22:48:52,771 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T22:48:52,772 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T22:48:52,772 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:52,772 DEBUG [RS:0;14efca635be3:42517 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs 2024-12-01T22:48:52,772 INFO [RS:0;14efca635be3:42517 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 14efca635be3%2C42517%2C1733093328877:(num 1733093330471) 2024-12-01T22:48:52,772 DEBUG [RS:0;14efca635be3:42517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,772 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733093332565Running coprocessor pre-close hooks at 1733093332565Disabling compacts and flushes for region at 1733093332565Disabling writes for close at 1733093332565Obtaining lock to block concurrent updates at 1733093332566 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733093332566Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733093332566Flushing stores of hbase:meta,,1.1588230740 at 1733093332567 (+1 ms)Flushing 1588230740/info: creating writer at 1733093332568 (+1 ms)Flushing 1588230740/info: appending metadata at 1733093332597 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733093332597Flushing 1588230740/ns: creating writer at 1733093332624 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733093332640 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733093332640Flushing 1588230740/table: creating writer at 1733093332665 (+25 ms)Flushing 1588230740/table: appending metadata at 1733093332689 (+24 ms)Flushing 1588230740/table: closing flushed file at 1733093332689Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f796ebc: reopening flushed file at 1733093332719 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51cba720: reopening flushed file at 1733093332733 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33968d2b: reopening flushed file at 1733093332745 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 195ms, sequenceid=11, compaction requested=false at 1733093332760 (+15 ms)Writing region close event to WAL at 1733093332762 (+2 ms)Running coprocessor post-close hooks at 1733093332772 (+10 ms)Closed at 1733093332772 2024-12-01T22:48:52,772 INFO [RS:0;14efca635be3:42517 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,772 INFO [RS:0;14efca635be3:42517 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:52,772 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:52,772 INFO [RS:0;14efca635be3:42517 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:52,773 INFO [RS:0;14efca635be3:42517 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:52,773 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:52,773 INFO [RS:0;14efca635be3:42517 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:52,773 INFO [RS:0;14efca635be3:42517 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:52,773 INFO [RS:0;14efca635be3:42517 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:52,773 INFO [RS:0;14efca635be3:42517 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42517 2024-12-01T22:48:52,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:52,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,42517,1733093328877 2024-12-01T22:48:52,825 INFO [RS:0;14efca635be3:42517 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:52,862 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,42517,1733093328877] 2024-12-01T22:48:52,873 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,42517,1733093328877 already deleted, retry=false 2024-12-01T22:48:52,873 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,42517,1733093328877 expired; onlineServers=1 2024-12-01T22:48:52,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:52,963 INFO [RS:0;14efca635be3:42517 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:52,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42517-0x10193157dd90001, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:52,963 INFO [RS:0;14efca635be3:42517 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,42517,1733093328877; zookeeper connection closed. 2024-12-01T22:48:52,963 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2694658d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2694658d 2024-12-01T22:48:52,965 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,43553,1733093329005; all regions closed. 2024-12-01T22:48:52,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_1073741829_1019 (size=2751) 2024-12-01T22:48:52,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_1073741829_1019 (size=2751) 2024-12-01T22:48:52,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_1073741829_1019 (size=2751) 2024-12-01T22:48:52,972 DEBUG [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs 2024-12-01T22:48:52,973 INFO [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 14efca635be3%2C43553%2C1733093329005.meta:.meta(num 1733093330961) 2024-12-01T22:48:52,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_1073741828_1018 (size=93) 2024-12-01T22:48:52,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_1073741828_1018 (size=93) 2024-12-01T22:48:52,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_1073741828_1018 (size=93) 2024-12-01T22:48:52,982 DEBUG [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/oldWALs 2024-12-01T22:48:52,982 INFO [RS:2;14efca635be3:43553 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 14efca635be3%2C43553%2C1733093329005:(num 1733093330471) 2024-12-01T22:48:52,982 DEBUG [RS:2;14efca635be3:43553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:52,982 INFO [RS:2;14efca635be3:43553 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:52,982 INFO [RS:2;14efca635be3:43553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:52,982 INFO [RS:2;14efca635be3:43553 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:52,982 INFO [RS:2;14efca635be3:43553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:52,982 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:52,983 INFO [RS:2;14efca635be3:43553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43553 2024-12-01T22:48:52,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:52,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,43553,1733093329005 2024-12-01T22:48:52,989 INFO [RS:2;14efca635be3:43553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:52,989 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,43553,1733093329005] 2024-12-01T22:48:53,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-01T22:48:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-01T22:48:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-01T22:48:53,009 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,43553,1733093329005 already deleted, retry=false 2024-12-01T22:48:53,010 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,43553,1733093329005 expired; onlineServers=0 2024-12-01T22:48:53,010 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '14efca635be3,38179,1733093328163' ***** 2024-12-01T22:48:53,010 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T22:48:53,010 INFO [M:0;14efca635be3:38179 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:53,010 INFO [M:0;14efca635be3:38179 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:53,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-01T22:48:53,011 DEBUG [M:0;14efca635be3:38179 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T22:48:53,011 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T22:48:53,011 DEBUG [M:0;14efca635be3:38179 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T22:48:53,011 INFO [M:0;14efca635be3:38179 {}] hbase.ChoreService(370): Chore service for: master/14efca635be3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:53,011 INFO [M:0;14efca635be3:38179 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:53,011 DEBUG [M:0;14efca635be3:38179 {}] master.HMaster(1795): Stopping service threads 2024-12-01T22:48:53,011 INFO [M:0;14efca635be3:38179 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T22:48:53,012 INFO [M:0;14efca635be3:38179 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T22:48:53,012 DEBUG [master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093330102 {}] cleaner.HFileCleaner(306): Exit Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093330102,5,FailOnTimeoutGroup] 2024-12-01T22:48:53,012 DEBUG [master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093330108 {}] cleaner.HFileCleaner(306): Exit Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093330108,5,FailOnTimeoutGroup] 2024-12-01T22:48:53,012 INFO [M:0;14efca635be3:38179 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T22:48:53,013 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T22:48:53,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:53,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:53,021 DEBUG [M:0;14efca635be3:38179 {}] zookeeper.ZKUtil(347): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T22:48:53,021 WARN [M:0;14efca635be3:38179 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T22:48:53,022 INFO [M:0;14efca635be3:38179 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/.lastflushedseqids 2024-12-01T22:48:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-01T22:48:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-01T22:48:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-01T22:48:53,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-01T22:48:53,036 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,036 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,045 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:34680 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34680 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:53,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-01T22:48:53,051 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:53,051 INFO [M:0;14efca635be3:38179 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T22:48:53,051 INFO [M:0;14efca635be3:38179 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T22:48:53,052 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T22:48:53,052 INFO [M:0;14efca635be3:38179 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:53,052 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:53,052 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T22:48:53,052 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:53,052 INFO [M:0;14efca635be3:38179 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-01T22:48:53,080 DEBUG [M:0;14efca635be3:38179 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a5a11251991432baf14b4a0c3416f44 is 82, key is hbase:meta,,1/info:regioninfo/1733093331058/Put/seqid=0 2024-12-01T22:48:53,082 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,082 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:34698 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34698 dst: /127.0.0.1:34251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-01T22:48:53,096 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:53,096 INFO [M:0;14efca635be3:38179 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a5a11251991432baf14b4a0c3416f44 2024-12-01T22:48:53,099 INFO [RS:2;14efca635be3:43553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:53,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:53,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43553-0x10193157dd90003, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:53,099 INFO [RS:2;14efca635be3:43553 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,43553,1733093329005; zookeeper connection closed. 2024-12-01T22:48:53,100 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@190aee3a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@190aee3a 2024-12-01T22:48:53,100 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T22:48:53,127 DEBUG [M:0;14efca635be3:38179 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0f8f48fdff3842cbbfe11a230bf2c944 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733093331920/Put/seqid=0 2024-12-01T22:48:53,130 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,130 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:44786 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44786 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-01T22:48:53,137 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:53,138 INFO [M:0;14efca635be3:38179 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0f8f48fdff3842cbbfe11a230bf2c944 2024-12-01T22:48:53,180 DEBUG [M:0;14efca635be3:38179 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e470360792e843acbb58a151e3377e84 is 69, key is 14efca635be3,32909,1733093328960/rs:state/1733093330187/Put/seqid=0 2024-12-01T22:48:53,182 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,183 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T22:48:53,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-01T22:48:53,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-01T22:48:53,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-140738658_22 at /127.0.0.1:44814 [Receiving block BP-1415834449-172.17.0.2-1733093323724:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44814 dst: /127.0.0.1:43459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T22:48:53,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-01T22:48:53,197 WARN [M:0;14efca635be3:38179 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T22:48:53,197 INFO [M:0;14efca635be3:38179 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e470360792e843acbb58a151e3377e84 2024-12-01T22:48:53,207 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a5a11251991432baf14b4a0c3416f44 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a5a11251991432baf14b4a0c3416f44 2024-12-01T22:48:53,219 INFO [M:0;14efca635be3:38179 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a5a11251991432baf14b4a0c3416f44, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T22:48:53,221 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0f8f48fdff3842cbbfe11a230bf2c944 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0f8f48fdff3842cbbfe11a230bf2c944 2024-12-01T22:48:53,230 INFO [M:0;14efca635be3:38179 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0f8f48fdff3842cbbfe11a230bf2c944, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T22:48:53,232 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e470360792e843acbb58a151e3377e84 as hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e470360792e843acbb58a151e3377e84 2024-12-01T22:48:53,240 INFO [M:0;14efca635be3:38179 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e470360792e843acbb58a151e3377e84, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T22:48:53,243 INFO [M:0;14efca635be3:38179 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=72, compaction requested=false 2024-12-01T22:48:53,245 INFO [M:0;14efca635be3:38179 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:53,245 DEBUG [M:0;14efca635be3:38179 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733093333052Disabling compacts and flushes for region at 1733093333052Disabling writes for close at 1733093333052Obtaining lock to block concurrent updates at 1733093333052Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733093333052Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733093333053 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733093333054 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733093333054Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733093333079 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733093333079Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733093333109 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733093333127 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733093333127Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733093333148 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733093333179 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733093333179Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65227936: reopening flushed file at 1733093333205 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@361f54aa: reopening flushed file at 1733093333219 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11f4b8f3: reopening flushed file at 1733093333230 (+11 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=72, compaction requested=false at 1733093333243 (+13 ms)Writing region close event to WAL at 1733093333245 (+2 ms)Closed at 1733093333245 2024-12-01T22:48:53,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43459 is added to blk_1073741825_1011 (size=32674) 2024-12-01T22:48:53,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37053 is added to blk_1073741825_1011 (size=32674) 2024-12-01T22:48:53,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34251 is added to blk_1073741825_1011 (size=32674) 2024-12-01T22:48:53,250 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:53,250 INFO [M:0;14efca635be3:38179 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T22:48:53,250 INFO [M:0;14efca635be3:38179 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38179 2024-12-01T22:48:53,251 INFO [M:0;14efca635be3:38179 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:53,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:53,362 INFO [M:0;14efca635be3:38179 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:53,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38179-0x10193157dd90000, quorum=127.0.0.1:55917, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:53,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:53,385 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:48:53,385 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:48:53,386 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:48:53,386 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,STOPPED} 2024-12-01T22:48:53,391 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:48:53,391 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1415834449-172.17.0.2-1733093323724 (Datanode Uuid 0ebe535e-2a4a-4998-9d26-8b618c802e37) service to localhost/127.0.0.1:40035 2024-12-01T22:48:53,392 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data5/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,393 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data6/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,393 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:48:53,393 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:48:53,394 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:48:53,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:53,400 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:48:53,400 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:48:53,401 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:48:53,401 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,STOPPED} 2024-12-01T22:48:53,404 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:48:53,404 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1415834449-172.17.0.2-1733093323724 (Datanode Uuid bb0b79aa-5167-4918-8e04-5f537c070dc7) service to localhost/127.0.0.1:40035 2024-12-01T22:48:53,405 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data3/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,405 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data4/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,405 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:48:53,405 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:48:53,406 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:48:53,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:53,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:48:53,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:48:53,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:48:53,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,STOPPED} 2024-12-01T22:48:53,417 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:48:53,417 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:48:53,418 WARN [BP-1415834449-172.17.0.2-1733093323724 heartbeating to localhost/127.0.0.1:40035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1415834449-172.17.0.2-1733093323724 (Datanode Uuid ee475698-2a10-4d8e-b077-05c7eba9188c) service to localhost/127.0.0.1:40035 2024-12-01T22:48:53,418 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:48:53,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data1/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/cluster_c24864cc-12c7-6f47-9228-80b54c86a323/data/data2/current/BP-1415834449-172.17.0.2-1733093323724 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:48:53,419 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:48:53,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T22:48:53,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:48:53,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:48:53,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:48:53,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir/,STOPPED} 2024-12-01T22:48:53,448 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T22:48:53,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T22:48:53,502 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 157), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=308 (was 334), ProcessCount=11 (was 11), AvailableMemoryMB=9455 (was 9748) 2024-12-01T22:48:53,512 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=308, ProcessCount=11, AvailableMemoryMB=9455 2024-12-01T22:48:53,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T22:48:53,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.log.dir so I do NOT create it in target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864 2024-12-01T22:48:53,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c8af83e-780d-7561-3d4e-1d65cfe75680/hadoop.tmp.dir so I do NOT create it in target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864 2024-12-01T22:48:53,513 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62, deleteOnExit=true 2024-12-01T22:48:53,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/test.cache.data in system properties and HBase conf 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir in system properties and HBase conf 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T22:48:53,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T22:48:53,514 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T22:48:53,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/nfs.dump.dir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/java.io.tmpdir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T22:48:53,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T22:48:53,876 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:53,889 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:53,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:53,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:53,901 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:53,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:53,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aa9d3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:53,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f854cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:54,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f04037c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/java.io.tmpdir/jetty-localhost-38855-hadoop-hdfs-3_4_1-tests_jar-_-any-13091725988420186688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T22:48:54,036 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11bed883{HTTP/1.1, (http/1.1)}{localhost:38855} 2024-12-01T22:48:54,037 INFO [Time-limited test {}] server.Server(415): Started @11996ms 2024-12-01T22:48:54,314 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:54,318 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:54,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:54,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:54,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:54,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a92fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:54,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@137179d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:54,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@582dea15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/java.io.tmpdir/jetty-localhost-33899-hadoop-hdfs-3_4_1-tests_jar-_-any-16664275648421691426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:54,420 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f6d3ff7{HTTP/1.1, (http/1.1)}{localhost:33899} 2024-12-01T22:48:54,421 INFO [Time-limited test {}] server.Server(415): Started @12380ms 2024-12-01T22:48:54,422 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:54,462 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:54,466 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:54,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:54,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:54,467 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:54,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb1336{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:54,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c597470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:54,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b340784{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/java.io.tmpdir/jetty-localhost-41209-hadoop-hdfs-3_4_1-tests_jar-_-any-13184014966985731108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:54,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1872922a{HTTP/1.1, (http/1.1)}{localhost:41209} 2024-12-01T22:48:54,568 INFO [Time-limited test {}] server.Server(415): Started @12528ms 2024-12-01T22:48:54,570 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:54,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T22:48:54,614 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T22:48:54,615 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T22:48:54,615 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T22:48:54,615 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T22:48:54,616 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da8210{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,AVAILABLE} 2024-12-01T22:48:54,616 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73f6422f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T22:48:54,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1183a3bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/java.io.tmpdir/jetty-localhost-34967-hadoop-hdfs-3_4_1-tests_jar-_-any-1097923097138594941/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:48:54,720 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ee4ec12{HTTP/1.1, (http/1.1)}{localhost:34967} 2024-12-01T22:48:54,720 INFO [Time-limited test {}] server.Server(415): Started @12680ms 2024-12-01T22:48:54,723 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T22:48:55,508 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data2/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:55,508 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data1/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:55,531 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24a8716b2fd2b846 with lease ID 0xd4a2285d27547b1: Processing first storage report for DS-e98cccfb-5212-4482-b742-89f4465b9338 from datanode DatanodeRegistration(127.0.0.1:41185, datanodeUuid=6c5f984b-6f03-4cf8-8c5c-7929410ba23f, infoPort=43707, infoSecurePort=0, ipcPort=36041, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24a8716b2fd2b846 with lease ID 0xd4a2285d27547b1: from storage DS-e98cccfb-5212-4482-b742-89f4465b9338 node DatanodeRegistration(127.0.0.1:41185, datanodeUuid=6c5f984b-6f03-4cf8-8c5c-7929410ba23f, infoPort=43707, infoSecurePort=0, ipcPort=36041, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24a8716b2fd2b846 with lease ID 0xd4a2285d27547b1: Processing first storage report for DS-fdb594a5-31cf-4b4b-a6cd-30e8f76dad92 from datanode DatanodeRegistration(127.0.0.1:41185, datanodeUuid=6c5f984b-6f03-4cf8-8c5c-7929410ba23f, infoPort=43707, infoSecurePort=0, ipcPort=36041, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24a8716b2fd2b846 with lease ID 0xd4a2285d27547b1: from storage DS-fdb594a5-31cf-4b4b-a6cd-30e8f76dad92 node DatanodeRegistration(127.0.0.1:41185, datanodeUuid=6c5f984b-6f03-4cf8-8c5c-7929410ba23f, infoPort=43707, infoSecurePort=0, ipcPort=36041, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:55,925 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data3/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:55,925 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data4/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:55,946 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:55,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc038174af572188c with lease ID 0xd4a2285d27547b2: Processing first storage report for DS-67434c34-1375-477c-96b2-9ea167b47afb from datanode DatanodeRegistration(127.0.0.1:42687, datanodeUuid=1c59fa26-629c-46ae-b078-d80f2b310286, infoPort=33311, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:55,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc038174af572188c with lease ID 0xd4a2285d27547b2: from storage DS-67434c34-1375-477c-96b2-9ea167b47afb node DatanodeRegistration(127.0.0.1:42687, datanodeUuid=1c59fa26-629c-46ae-b078-d80f2b310286, infoPort=33311, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:55,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc038174af572188c with lease ID 0xd4a2285d27547b2: Processing first storage report for DS-605d336d-fe96-47e9-8368-f2d0d1b74888 from datanode DatanodeRegistration(127.0.0.1:42687, datanodeUuid=1c59fa26-629c-46ae-b078-d80f2b310286, infoPort=33311, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:55,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc038174af572188c with lease ID 0xd4a2285d27547b2: from storage DS-605d336d-fe96-47e9-8368-f2d0d1b74888 node DatanodeRegistration(127.0.0.1:42687, datanodeUuid=1c59fa26-629c-46ae-b078-d80f2b310286, infoPort=33311, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:56,455 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data5/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:56,455 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data6/current/BP-1604229375-172.17.0.2-1733093333546/current, will proceed with Du for space computation calculation, 2024-12-01T22:48:56,480 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T22:48:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc31600e1e99501 with lease ID 0xd4a2285d27547b3: Processing first storage report for DS-4775ac2b-6fb2-4796-b337-fd51d0a55b53 from datanode DatanodeRegistration(127.0.0.1:35317, datanodeUuid=d5e32e25-5c39-4fda-8105-ef5d97ca22cb, infoPort=35975, infoSecurePort=0, ipcPort=35911, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc31600e1e99501 with lease ID 0xd4a2285d27547b3: from storage DS-4775ac2b-6fb2-4796-b337-fd51d0a55b53 node DatanodeRegistration(127.0.0.1:35317, datanodeUuid=d5e32e25-5c39-4fda-8105-ef5d97ca22cb, infoPort=35975, infoSecurePort=0, ipcPort=35911, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T22:48:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc31600e1e99501 with lease ID 0xd4a2285d27547b3: Processing first storage report for DS-7f84b805-01c8-4c8a-901a-1963e08202dc from datanode DatanodeRegistration(127.0.0.1:35317, datanodeUuid=d5e32e25-5c39-4fda-8105-ef5d97ca22cb, infoPort=35975, infoSecurePort=0, ipcPort=35911, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546) 2024-12-01T22:48:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc31600e1e99501 with lease ID 0xd4a2285d27547b3: from storage DS-7f84b805-01c8-4c8a-901a-1963e08202dc node DatanodeRegistration(127.0.0.1:35317, datanodeUuid=d5e32e25-5c39-4fda-8105-ef5d97ca22cb, infoPort=35975, infoSecurePort=0, ipcPort=35911, storageInfo=lv=-57;cid=testClusterID;nsid=960440784;c=1733093333546), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T22:48:56,499 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T22:48:56,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T22:48:56,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T22:48:56,575 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864 2024-12-01T22:48:56,578 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/zookeeper_0, clientPort=55536, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T22:48:56,579 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55536 2024-12-01T22:48:56,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741825_1001 (size=7) 2024-12-01T22:48:56,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741825_1001 (size=7) 2024-12-01T22:48:56,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741825_1001 (size=7) 2024-12-01T22:48:56,597 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 with version=8 2024-12-01T22:48:56,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40035/user/jenkins/test-data/a51c2b30-0b1b-e76c-fda8-1e0734c789a0/hbase-staging 2024-12-01T22:48:56,599 INFO [Time-limited test {}] client.ConnectionUtils(128): master/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T22:48:56,599 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:56,600 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37735 2024-12-01T22:48:56,601 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37735 connecting to ZooKeeper ensemble=127.0.0.1:55536 2024-12-01T22:48:56,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377350x0, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:56,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37735-0x1019315a1ac0000 connected 2024-12-01T22:48:56,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:56,751 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599, hbase.cluster.distributed=false 2024-12-01T22:48:56,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:56,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37735 2024-12-01T22:48:56,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37735 2024-12-01T22:48:56,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37735 2024-12-01T22:48:56,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37735 2024-12-01T22:48:56,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37735 2024-12-01T22:48:56,769 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:56,770 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:56,771 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41365 2024-12-01T22:48:56,773 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41365 connecting to ZooKeeper ensemble=127.0.0.1:55536 2024-12-01T22:48:56,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,788 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413650x0, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:56,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413650x0, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:56,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41365-0x1019315a1ac0001 connected 2024-12-01T22:48:56,790 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:56,790 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:56,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:56,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:56,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41365 2024-12-01T22:48:56,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41365 2024-12-01T22:48:56,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41365 2024-12-01T22:48:56,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41365 2024-12-01T22:48:56,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41365 2024-12-01T22:48:56,814 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:56,814 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:56,815 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:56,815 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39869 2024-12-01T22:48:56,817 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39869 connecting to ZooKeeper ensemble=127.0.0.1:55536 2024-12-01T22:48:56,817 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398690x0, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:56,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:56,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39869-0x1019315a1ac0002 connected 2024-12-01T22:48:56,831 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:56,832 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:56,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:56,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:56,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39869 2024-12-01T22:48:56,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39869 2024-12-01T22:48:56,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39869 2024-12-01T22:48:56,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39869 2024-12-01T22:48:56,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39869 2024-12-01T22:48:56,859 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/14efca635be3:0 server-side Connection retries=45 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T22:48:56,859 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T22:48:56,860 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T22:48:56,860 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38085 2024-12-01T22:48:56,862 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38085 connecting to ZooKeeper ensemble=127.0.0.1:55536 2024-12-01T22:48:56,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380850x0, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T22:48:56,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:380850x0, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:56,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38085-0x1019315a1ac0003 connected 2024-12-01T22:48:56,873 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T22:48:56,874 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T22:48:56,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T22:48:56,875 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T22:48:56,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38085 2024-12-01T22:48:56,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38085 2024-12-01T22:48:56,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38085 2024-12-01T22:48:56,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38085 2024-12-01T22:48:56,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38085 2024-12-01T22:48:56,889 DEBUG [M:0;14efca635be3:37735 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;14efca635be3:37735 2024-12-01T22:48:56,889 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/14efca635be3,37735,1733093336598 2024-12-01T22:48:56,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,894 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,894 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/14efca635be3,37735,1733093336598 2024-12-01T22:48:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:56,904 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T22:48:56,904 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:56,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:56,907 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T22:48:56,907 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/14efca635be3,37735,1733093336598 from backup master directory 2024-12-01T22:48:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,915 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/14efca635be3,37735,1733093336598 2024-12-01T22:48:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,915 WARN [master/14efca635be3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T22:48:56,915 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=14efca635be3,37735,1733093336598 2024-12-01T22:48:56,921 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/hbase.id] with ID: b76492ec-1e47-4d8a-b334-5379c009efee 2024-12-01T22:48:56,921 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/.tmp/hbase.id 2024-12-01T22:48:56,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741826_1002 (size=42) 2024-12-01T22:48:56,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741826_1002 (size=42) 2024-12-01T22:48:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741826_1002 (size=42) 2024-12-01T22:48:56,931 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/.tmp/hbase.id]:[hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/hbase.id] 2024-12-01T22:48:56,948 INFO [master/14efca635be3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T22:48:56,948 INFO [master/14efca635be3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T22:48:56,950 INFO [master/14efca635be3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-01T22:48:56,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:56,999 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741827_1003 (size=196) 2024-12-01T22:48:57,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741827_1003 (size=196) 2024-12-01T22:48:57,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741827_1003 (size=196) 2024-12-01T22:48:57,013 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T22:48:57,014 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T22:48:57,014 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T22:48:57,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741828_1004 (size=1189) 2024-12-01T22:48:57,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741828_1004 (size=1189) 2024-12-01T22:48:57,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741828_1004 (size=1189) 2024-12-01T22:48:57,429 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store 2024-12-01T22:48:57,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741829_1005 (size=34) 2024-12-01T22:48:57,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741829_1005 (size=34) 2024-12-01T22:48:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741829_1005 (size=34) 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T22:48:57,441 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:57,441 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:48:57,441 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733093337441Disabling compacts and flushes for region at 1733093337441Disabling writes for close at 1733093337441Writing region close event to WAL at 1733093337441Closed at 1733093337441 2024-12-01T22:48:57,442 WARN [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/.initializing 2024-12-01T22:48:57,442 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/WALs/14efca635be3,37735,1733093336598 2024-12-01T22:48:57,446 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C37735%2C1733093336598, suffix=, logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/WALs/14efca635be3,37735,1733093336598, archiveDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/oldWALs, maxLogs=10 2024-12-01T22:48:57,447 INFO [master/14efca635be3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 14efca635be3%2C37735%2C1733093336598.1733093337447 2024-12-01T22:48:57,457 INFO [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/WALs/14efca635be3,37735,1733093336598/14efca635be3%2C37735%2C1733093336598.1733093337447 2024-12-01T22:48:57,468 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:35975:35975)] 2024-12-01T22:48:57,469 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:57,469 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:57,469 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,469 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T22:48:57,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:57,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T22:48:57,475 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:57,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T22:48:57,479 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:57,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T22:48:57,481 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:57,482 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,483 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,483 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,485 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,485 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,486 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:57,488 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T22:48:57,491 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:57,492 INFO [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59071710, jitterRate=-0.11976292729377747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:57,492 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733093337469Initializing all the Stores at 1733093337471 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093337471Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093337471Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093337471Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093337471Cleaning up temporary data from old regions at 1733093337485 (+14 ms)Region opened successfully at 1733093337492 (+7 ms) 2024-12-01T22:48:57,493 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T22:48:57,497 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3485c7a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:57,498 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T22:48:57,498 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T22:48:57,498 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T22:48:57,499 INFO [master/14efca635be3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T22:48:57,499 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T22:48:57,500 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-01T22:48:57,500 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T22:48:57,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T22:48:57,503 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T22:48:57,546 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T22:48:57,547 INFO [master/14efca635be3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T22:48:57,547 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T22:48:57,557 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T22:48:57,557 INFO [master/14efca635be3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T22:48:57,558 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T22:48:57,567 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T22:48:57,568 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T22:48:57,578 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T22:48:57,580 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T22:48:57,588 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:57,599 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,599 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,600 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=14efca635be3,37735,1733093336598, sessionid=0x1019315a1ac0000, setting cluster-up flag (Was=false) 2024-12-01T22:48:57,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,620 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,651 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T22:48:57,653 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=14efca635be3,37735,1733093336598 2024-12-01T22:48:57,672 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:57,704 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T22:48:57,706 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=14efca635be3,37735,1733093336598 2024-12-01T22:48:57,707 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T22:48:57,710 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:57,710 INFO [master/14efca635be3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T22:48:57,710 INFO [master/14efca635be3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T22:48:57,710 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 14efca635be3,37735,1733093336598 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/14efca635be3:0, corePoolSize=5, maxPoolSize=5 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/14efca635be3:0, corePoolSize=10, maxPoolSize=10 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:57,712 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,714 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733093367714 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T22:48:57,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T22:48:57,715 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T22:48:57,715 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T22:48:57,716 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,717 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T22:48:57,717 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,717 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T22:48:57,717 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T22:48:57,717 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T22:48:57,717 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T22:48:57,717 INFO [master/14efca635be3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T22:48:57,718 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093337717,5,FailOnTimeoutGroup] 2024-12-01T22:48:57,718 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093337718,5,FailOnTimeoutGroup] 2024-12-01T22:48:57,718 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,718 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T22:48:57,718 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,718 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741831_1007 (size=1321) 2024-12-01T22:48:57,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741831_1007 (size=1321) 2024-12-01T22:48:57,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741831_1007 (size=1321) 2024-12-01T22:48:57,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T22:48:57,733 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 2024-12-01T22:48:57,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741832_1008 (size=32) 2024-12-01T22:48:57,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741832_1008 (size=32) 2024-12-01T22:48:57,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741832_1008 (size=32) 2024-12-01T22:48:57,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:57,753 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T22:48:57,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T22:48:57,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:57,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T22:48:57,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T22:48:57,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:57,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T22:48:57,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T22:48:57,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:57,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T22:48:57,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T22:48:57,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:57,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:57,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T22:48:57,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740 2024-12-01T22:48:57,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740 2024-12-01T22:48:57,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T22:48:57,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T22:48:57,768 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:57,769 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T22:48:57,772 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:57,773 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65303913, jitterRate=-0.02689586579799652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:57,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733093337751Initializing all the Stores at 1733093337752 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093337752Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093337753 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093337753Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093337753Cleaning up temporary data from old regions at 1733093337767 (+14 ms)Region opened successfully at 1733093337774 (+7 ms) 2024-12-01T22:48:57,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T22:48:57,774 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T22:48:57,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T22:48:57,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T22:48:57,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T22:48:57,777 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:57,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733093337774Disabling compacts and flushes for region at 1733093337774Disabling writes for close at 1733093337774Writing region close event to WAL at 1733093337777 (+3 ms)Closed at 1733093337777 2024-12-01T22:48:57,779 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:57,779 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T22:48:57,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T22:48:57,781 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(746): ClusterId : b76492ec-1e47-4d8a-b334-5379c009efee 2024-12-01T22:48:57,781 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:57,781 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(746): ClusterId : b76492ec-1e47-4d8a-b334-5379c009efee 2024-12-01T22:48:57,781 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(746): ClusterId : b76492ec-1e47-4d8a-b334-5379c009efee 2024-12-01T22:48:57,781 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:57,781 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T22:48:57,782 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T22:48:57,784 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T22:48:57,799 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:57,799 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:57,799 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:57,799 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:57,800 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T22:48:57,800 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T22:48:57,810 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:57,810 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:57,810 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T22:48:57,810 DEBUG [RS:0;14efca635be3:41365 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1080fefb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:57,810 DEBUG [RS:2;14efca635be3:38085 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1195e2e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:57,810 DEBUG [RS:1;14efca635be3:39869 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@169d1d82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=14efca635be3/172.17.0.2:0 2024-12-01T22:48:57,820 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;14efca635be3:41365 2024-12-01T22:48:57,820 INFO [RS:0;14efca635be3:41365 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:57,821 INFO [RS:0;14efca635be3:41365 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:57,821 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;14efca635be3:39869 2024-12-01T22:48:57,821 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:57,821 INFO [RS:1;14efca635be3:39869 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:57,821 INFO [RS:1;14efca635be3:39869 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:57,821 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:57,822 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,37735,1733093336598 with port=39869, startcode=1733093336813 2024-12-01T22:48:57,822 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,37735,1733093336598 with port=41365, startcode=1733093336769 2024-12-01T22:48:57,822 DEBUG [RS:1;14efca635be3:39869 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:57,822 DEBUG [RS:0;14efca635be3:41365 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:57,822 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;14efca635be3:38085 2024-12-01T22:48:57,822 INFO [RS:2;14efca635be3:38085 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T22:48:57,822 INFO [RS:2;14efca635be3:38085 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T22:48:57,823 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T22:48:57,823 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(2659): reportForDuty to master=14efca635be3,37735,1733093336598 with port=38085, startcode=1733093336858 2024-12-01T22:48:57,824 DEBUG [RS:2;14efca635be3:38085 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T22:48:57,824 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:57,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,39869,1733093336813 2024-12-01T22:48:57,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(517): Registering regionserver=14efca635be3,39869,1733093336813 2024-12-01T22:48:57,826 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57171, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:57,826 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39433, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T22:48:57,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,41365,1733093336769 2024-12-01T22:48:57,827 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 2024-12-01T22:48:57,827 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44601 2024-12-01T22:48:57,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(517): Registering regionserver=14efca635be3,41365,1733093336769 2024-12-01T22:48:57,827 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:57,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 14efca635be3,38085,1733093336858 2024-12-01T22:48:57,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37735 {}] master.ServerManager(517): Registering regionserver=14efca635be3,38085,1733093336858 2024-12-01T22:48:57,829 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 2024-12-01T22:48:57,830 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44601 2024-12-01T22:48:57,830 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:57,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:57,832 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 2024-12-01T22:48:57,832 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44601 2024-12-01T22:48:57,832 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T22:48:57,868 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,41365,1733093336769] 2024-12-01T22:48:57,868 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,39869,1733093336813] 2024-12-01T22:48:57,869 DEBUG [RS:1;14efca635be3:39869 {}] zookeeper.ZKUtil(111): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,39869,1733093336813 2024-12-01T22:48:57,869 WARN [RS:1;14efca635be3:39869 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:57,869 INFO [RS:1;14efca635be3:39869 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T22:48:57,869 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,39869,1733093336813 2024-12-01T22:48:57,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:57,883 DEBUG [RS:0;14efca635be3:41365 {}] zookeeper.ZKUtil(111): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,41365,1733093336769 2024-12-01T22:48:57,883 WARN [RS:0;14efca635be3:41365 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:57,883 INFO [RS:0;14efca635be3:41365 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T22:48:57,883 DEBUG [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,41365,1733093336769 2024-12-01T22:48:57,884 DEBUG [RS:2;14efca635be3:38085 {}] zookeeper.ZKUtil(111): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/14efca635be3,38085,1733093336858 2024-12-01T22:48:57,884 WARN [RS:2;14efca635be3:38085 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T22:48:57,884 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [14efca635be3,38085,1733093336858] 2024-12-01T22:48:57,884 INFO [RS:2;14efca635be3:38085 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T22:48:57,884 DEBUG [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,38085,1733093336858 2024-12-01T22:48:57,884 INFO [RS:1;14efca635be3:39869 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:57,887 INFO [RS:2;14efca635be3:38085 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:57,887 INFO [RS:0;14efca635be3:41365 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T22:48:57,888 INFO [RS:1;14efca635be3:39869 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:57,892 INFO [RS:1;14efca635be3:39869 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:57,892 INFO [RS:2;14efca635be3:38085 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:57,892 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,898 INFO [RS:0;14efca635be3:41365 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T22:48:57,899 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:57,899 INFO [RS:2;14efca635be3:38085 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:57,899 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,900 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:57,900 INFO [RS:0;14efca635be3:41365 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T22:48:57,900 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,900 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T22:48:57,900 INFO [RS:1;14efca635be3:39869 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:57,900 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,900 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,900 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,901 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,902 DEBUG [RS:1;14efca635be3:39869 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,902 INFO [RS:0;14efca635be3:41365 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:57,902 INFO [RS:2;14efca635be3:38085 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T22:48:57,903 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,39869,1733093336813-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/14efca635be3:0, corePoolSize=2, maxPoolSize=2 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,903 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/14efca635be3:0, corePoolSize=1, maxPoolSize=1 2024-12-01T22:48:57,904 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,904 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,904 DEBUG [RS:2;14efca635be3:38085 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,904 DEBUG [RS:0;14efca635be3:41365 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0, corePoolSize=3, maxPoolSize=3 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38085,1733093336858-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,908 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,41365,1733093336769-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:57,916 INFO [RS:1;14efca635be3:39869 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:57,917 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,39869,1733093336813-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,917 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,917 INFO [RS:1;14efca635be3:39869 {}] regionserver.Replication(171): 14efca635be3,39869,1733093336813 started 2024-12-01T22:48:57,929 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,929 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,39869,1733093336813, RpcServer on 14efca635be3/172.17.0.2:39869, sessionid=0x1019315a1ac0002 2024-12-01T22:48:57,929 INFO [RS:0;14efca635be3:41365 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:57,929 INFO [RS:2;14efca635be3:38085 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T22:48:57,929 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:57,929 DEBUG [RS:1;14efca635be3:39869 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,39869,1733093336813 2024-12-01T22:48:57,929 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,39869,1733093336813' 2024-12-01T22:48:57,929 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,41365,1733093336769-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,929 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,38085,1733093336858-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,929 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:57,929 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,929 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,929 INFO [RS:0;14efca635be3:41365 {}] regionserver.Replication(171): 14efca635be3,41365,1733093336769 started 2024-12-01T22:48:57,929 INFO [RS:2;14efca635be3:38085 {}] regionserver.Replication(171): 14efca635be3,38085,1733093336858 started 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,39869,1733093336813 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,39869,1733093336813' 2024-12-01T22:48:57,930 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:57,931 DEBUG [RS:1;14efca635be3:39869 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:57,932 DEBUG [RS:1;14efca635be3:39869 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:57,932 INFO [RS:1;14efca635be3:39869 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:57,932 INFO [RS:1;14efca635be3:39869 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:57,934 WARN [14efca635be3:37735 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T22:48:57,949 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,949 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:57,950 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,41365,1733093336769, RpcServer on 14efca635be3/172.17.0.2:41365, sessionid=0x1019315a1ac0001 2024-12-01T22:48:57,950 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1482): Serving as 14efca635be3,38085,1733093336858, RpcServer on 14efca635be3/172.17.0.2:38085, sessionid=0x1019315a1ac0003 2024-12-01T22:48:57,950 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:57,950 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T22:48:57,950 DEBUG [RS:0;14efca635be3:41365 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,41365,1733093336769 2024-12-01T22:48:57,950 DEBUG [RS:2;14efca635be3:38085 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 14efca635be3,38085,1733093336858 2024-12-01T22:48:57,950 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,38085,1733093336858' 2024-12-01T22:48:57,950 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,41365,1733093336769' 2024-12-01T22:48:57,950 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:57,950 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T22:48:57,951 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:57,951 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T22:48:57,951 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:57,951 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T22:48:57,951 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:57,951 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T22:48:57,951 DEBUG [RS:0;14efca635be3:41365 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,41365,1733093336769 2024-12-01T22:48:57,951 DEBUG [RS:2;14efca635be3:38085 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 14efca635be3,38085,1733093336858 2024-12-01T22:48:57,952 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,41365,1733093336769' 2024-12-01T22:48:57,952 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '14efca635be3,38085,1733093336858' 2024-12-01T22:48:57,952 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:57,952 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T22:48:57,952 DEBUG [RS:0;14efca635be3:41365 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:57,952 DEBUG [RS:2;14efca635be3:38085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T22:48:57,952 DEBUG [RS:0;14efca635be3:41365 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:57,952 INFO [RS:0;14efca635be3:41365 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:57,952 DEBUG [RS:2;14efca635be3:38085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T22:48:57,952 INFO [RS:0;14efca635be3:41365 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:57,952 INFO [RS:2;14efca635be3:38085 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T22:48:57,952 INFO [RS:2;14efca635be3:38085 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T22:48:58,034 INFO [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C39869%2C1733093336813, suffix=, logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,39869,1733093336813, archiveDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs, maxLogs=32 2024-12-01T22:48:58,036 INFO [RS:1;14efca635be3:39869 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 14efca635be3%2C39869%2C1733093336813.1733093338036 2024-12-01T22:48:58,047 INFO [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,39869,1733093336813/14efca635be3%2C39869%2C1733093336813.1733093338036 2024-12-01T22:48:58,050 DEBUG [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:35975:35975)] 2024-12-01T22:48:58,055 INFO [RS:0;14efca635be3:41365 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C41365%2C1733093336769, suffix=, logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,41365,1733093336769, archiveDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs, maxLogs=32 2024-12-01T22:48:58,055 INFO [RS:2;14efca635be3:38085 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C38085%2C1733093336858, suffix=, logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,38085,1733093336858, archiveDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs, maxLogs=32 2024-12-01T22:48:58,056 INFO [RS:2;14efca635be3:38085 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 14efca635be3%2C38085%2C1733093336858.1733093338056 2024-12-01T22:48:58,056 INFO [RS:0;14efca635be3:41365 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 14efca635be3%2C41365%2C1733093336769.1733093338056 2024-12-01T22:48:58,071 INFO [RS:0;14efca635be3:41365 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,41365,1733093336769/14efca635be3%2C41365%2C1733093336769.1733093338056 2024-12-01T22:48:58,072 DEBUG [RS:0;14efca635be3:41365 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:35975:35975),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-01T22:48:58,073 INFO [RS:2;14efca635be3:38085 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,38085,1733093336858/14efca635be3%2C38085%2C1733093336858.1733093338056 2024-12-01T22:48:58,078 DEBUG [RS:2;14efca635be3:38085 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35975:35975),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:33311:33311)] 2024-12-01T22:48:58,184 DEBUG [14efca635be3:37735 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T22:48:58,185 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(204): Hosts are {14efca635be3=0} racks are {/default-rack=0} 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T22:48:58,187 INFO [14efca635be3:37735 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T22:48:58,187 INFO [14efca635be3:37735 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T22:48:58,187 INFO [14efca635be3:37735 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T22:48:58,187 DEBUG [14efca635be3:37735 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T22:48:58,188 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=14efca635be3,39869,1733093336813 2024-12-01T22:48:58,190 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 14efca635be3,39869,1733093336813, state=OPENING 2024-12-01T22:48:58,241 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T22:48:58,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:58,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:58,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:58,251 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:58,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,252 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T22:48:58,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=14efca635be3,39869,1733093336813}] 2024-12-01T22:48:58,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,253 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,407 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T22:48:58,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52423, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T22:48:58,413 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T22:48:58,414 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T22:48:58,416 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=14efca635be3%2C39869%2C1733093336813.meta, suffix=.meta, logDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,39869,1733093336813, archiveDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs, maxLogs=32 2024-12-01T22:48:58,417 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 14efca635be3%2C39869%2C1733093336813.meta.1733093338417.meta 2024-12-01T22:48:58,430 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/WALs/14efca635be3,39869,1733093336813/14efca635be3%2C39869%2C1733093336813.meta.1733093338417.meta 2024-12-01T22:48:58,432 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35975:35975),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:33311:33311)] 2024-12-01T22:48:58,436 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T22:48:58,437 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T22:48:58,437 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T22:48:58,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T22:48:58,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T22:48:58,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:58,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T22:48:58,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T22:48:58,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:58,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T22:48:58,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T22:48:58,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:58,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T22:48:58,448 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T22:48:58,448 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,449 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T22:48:58,449 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T22:48:58,450 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740 2024-12-01T22:48:58,452 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740 2024-12-01T22:48:58,453 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T22:48:58,453 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T22:48:58,454 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T22:48:58,455 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T22:48:58,456 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61413783, jitterRate=-0.0848633199930191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T22:48:58,457 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T22:48:58,458 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733093338437Writing region info on filesystem at 1733093338437Initializing all the Stores at 1733093338439 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093338439Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093338440 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093338440Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733093338440Cleaning up temporary data from old regions at 1733093338453 (+13 ms)Running coprocessor post-open hooks at 1733093338457 (+4 ms)Region opened successfully at 1733093338458 (+1 ms) 2024-12-01T22:48:58,460 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733093338406 2024-12-01T22:48:58,463 DEBUG [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T22:48:58,463 INFO [RS_OPEN_META-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T22:48:58,464 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=14efca635be3,39869,1733093336813 2024-12-01T22:48:58,466 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 14efca635be3,39869,1733093336813, state=OPEN 2024-12-01T22:48:58,472 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:58,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:58,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:58,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T22:48:58,473 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=14efca635be3,39869,1733093336813 2024-12-01T22:48:58,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T22:48:58,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T22:48:58,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=14efca635be3,39869,1733093336813 in 221 msec 2024-12-01T22:48:58,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T22:48:58,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 698 msec 2024-12-01T22:48:58,484 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T22:48:58,484 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T22:48:58,486 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T22:48:58,486 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=14efca635be3,39869,1733093336813, seqNum=-1] 2024-12-01T22:48:58,487 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T22:48:58,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-01T22:48:58,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T22:48:58,489 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-01T22:48:58,491 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T22:48:58,491 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-01T22:48:58,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-01T22:48:58,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-01T22:48:58,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 787 msec 2024-12-01T22:48:58,498 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733093338498, completionTime=-1 2024-12-01T22:48:58,498 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T22:48:58,498 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T22:48:58,501 INFO [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T22:48:58,501 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733093398501 2024-12-01T22:48:58,501 INFO [master/14efca635be3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733093458501 2024-12-01T22:48:58,501 INFO [master/14efca635be3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-01T22:48:58,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-14efca635be3:37735, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,502 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,504 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,505 DEBUG [master/14efca635be3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T22:48:58,510 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.595sec 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T22:48:58,511 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T22:48:58,514 DEBUG [master/14efca635be3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T22:48:58,514 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T22:48:58,514 INFO [master/14efca635be3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=14efca635be3,37735,1733093336598-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T22:48:58,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438ad0b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:58,582 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 14efca635be3,37735,-1 for getting cluster id 2024-12-01T22:48:58,582 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T22:48:58,583 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b76492ec-1e47-4d8a-b334-5379c009efee' 2024-12-01T22:48:58,584 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T22:48:58,584 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b76492ec-1e47-4d8a-b334-5379c009efee" 2024-12-01T22:48:58,584 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@edcd13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:58,584 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [14efca635be3,37735,-1] 2024-12-01T22:48:58,585 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T22:48:58,585 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:58,587 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T22:48:58,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef9cf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T22:48:58,589 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T22:48:58,590 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=14efca635be3,39869,1733093336813, seqNum=-1] 2024-12-01T22:48:58,591 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T22:48:58,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T22:48:58,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=14efca635be3,37735,1733093336598 2024-12-01T22:48:58,597 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T22:48:58,599 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 14efca635be3,37735,1733093336598 2024-12-01T22:48:58,599 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@702fa1f2 2024-12-01T22:48:58,599 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T22:48:58,601 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T22:48:58,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T22:48:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T22:48:58,606 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T22:48:58,607 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T22:48:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:58,615 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T22:48:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741837_1013 (size=392) 2024-12-01T22:48:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741837_1013 (size=392) 2024-12-01T22:48:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741837_1013 (size=392) 2024-12-01T22:48:58,634 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72bf82c5af4139f05c1900136146326f, NAME => 'TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599 2024-12-01T22:48:58,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741838_1014 (size=51) 2024-12-01T22:48:58,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741838_1014 (size=51) 2024-12-01T22:48:58,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741838_1014 (size=51) 2024-12-01T22:48:58,650 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:58,650 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 72bf82c5af4139f05c1900136146326f, disabling compactions & flushes 2024-12-01T22:48:58,650 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:58,650 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:58,650 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. after waiting 0 ms 2024-12-01T22:48:58,650 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:58,650 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:58,651 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72bf82c5af4139f05c1900136146326f: Waiting for close lock at 1733093338650Disabling compacts and flushes for region at 1733093338650Disabling writes for close at 1733093338650Writing region close event to WAL at 1733093338650Closed at 1733093338650 2024-12-01T22:48:58,653 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T22:48:58,653 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733093338653"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733093338653"}]},"ts":"1733093338653"} 2024-12-01T22:48:58,657 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T22:48:58,659 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T22:48:58,659 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733093338659"}]},"ts":"1733093338659"} 2024-12-01T22:48:58,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T22:48:58,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {14efca635be3=0} racks are {/default-rack=0} 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T22:48:58,664 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T22:48:58,664 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T22:48:58,664 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T22:48:58,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T22:48:58,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=72bf82c5af4139f05c1900136146326f, ASSIGN}] 2024-12-01T22:48:58,667 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=72bf82c5af4139f05c1900136146326f, ASSIGN 2024-12-01T22:48:58,669 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=72bf82c5af4139f05c1900136146326f, ASSIGN; state=OFFLINE, location=14efca635be3,39869,1733093336813; forceNewPlan=false, retain=false 2024-12-01T22:48:58,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:58,819 INFO [14efca635be3:37735 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T22:48:58,820 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72bf82c5af4139f05c1900136146326f, regionState=OPENING, regionLocation=14efca635be3,39869,1733093336813 2024-12-01T22:48:58,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=72bf82c5af4139f05c1900136146326f, ASSIGN because future has completed 2024-12-01T22:48:58,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72bf82c5af4139f05c1900136146326f, server=14efca635be3,39869,1733093336813}] 2024-12-01T22:48:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:58,984 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:58,984 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 72bf82c5af4139f05c1900136146326f, NAME => 'TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.', STARTKEY => '', ENDKEY => ''} 2024-12-01T22:48:58,985 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,985 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T22:48:58,985 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,986 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,988 INFO [StoreOpener-72bf82c5af4139f05c1900136146326f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,990 INFO [StoreOpener-72bf82c5af4139f05c1900136146326f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72bf82c5af4139f05c1900136146326f columnFamilyName cf 2024-12-01T22:48:58,990 DEBUG [StoreOpener-72bf82c5af4139f05c1900136146326f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T22:48:58,991 INFO [StoreOpener-72bf82c5af4139f05c1900136146326f-1 {}] regionserver.HStore(327): Store=72bf82c5af4139f05c1900136146326f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T22:48:58,991 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,992 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,993 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,994 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,994 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:58,996 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:59,003 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T22:48:59,004 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 72bf82c5af4139f05c1900136146326f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66498397, jitterRate=-0.009096667170524597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T22:48:59,004 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:59,005 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 72bf82c5af4139f05c1900136146326f: Running coprocessor pre-open hook at 1733093338986Writing region info on filesystem at 1733093338986Initializing all the Stores at 1733093338988 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733093338988Cleaning up temporary data from old regions at 1733093338994 (+6 ms)Running coprocessor post-open hooks at 1733093339004 (+10 ms)Region opened successfully at 1733093339005 (+1 ms) 2024-12-01T22:48:59,007 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f., pid=6, masterSystemTime=1733093338978 2024-12-01T22:48:59,011 DEBUG [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,011 INFO [RS_OPEN_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72bf82c5af4139f05c1900136146326f, regionState=OPEN, openSeqNum=2, regionLocation=14efca635be3,39869,1733093336813 2024-12-01T22:48:59,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72bf82c5af4139f05c1900136146326f, server=14efca635be3,39869,1733093336813 because future has completed 2024-12-01T22:48:59,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T22:48:59,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 72bf82c5af4139f05c1900136146326f, server=14efca635be3,39869,1733093336813 in 195 msec 2024-12-01T22:48:59,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T22:48:59,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=72bf82c5af4139f05c1900136146326f, ASSIGN in 360 msec 2024-12-01T22:48:59,031 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T22:48:59,031 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733093339031"}]},"ts":"1733093339031"} 2024-12-01T22:48:59,035 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T22:48:59,036 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T22:48:59,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 434 msec 2024-12-01T22:48:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T22:48:59,236 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T22:48:59,236 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T22:48:59,236 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T22:48:59,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T22:48:59,240 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T22:48:59,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T22:48:59,244 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f., hostname=14efca635be3,39869,1733093336813, seqNum=2] 2024-12-01T22:48:59,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T22:48:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T22:48:59,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:59,252 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T22:48:59,253 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T22:48:59,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T22:48:59,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:59,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39869 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T22:48:59,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,408 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 72bf82c5af4139f05c1900136146326f 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T22:48:59,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/.tmp/cf/1dc407b4b79b469ab3bebf5b5afca229 is 36, key is row/cf:cq/1733093339245/Put/seqid=0 2024-12-01T22:48:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741839_1015 (size=4787) 2024-12-01T22:48:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741839_1015 (size=4787) 2024-12-01T22:48:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741839_1015 (size=4787) 2024-12-01T22:48:59,439 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/.tmp/cf/1dc407b4b79b469ab3bebf5b5afca229 2024-12-01T22:48:59,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/.tmp/cf/1dc407b4b79b469ab3bebf5b5afca229 as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/cf/1dc407b4b79b469ab3bebf5b5afca229 2024-12-01T22:48:59,455 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/cf/1dc407b4b79b469ab3bebf5b5afca229, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T22:48:59,457 INFO [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 72bf82c5af4139f05c1900136146326f in 49ms, sequenceid=5, compaction requested=false 2024-12-01T22:48:59,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 72bf82c5af4139f05c1900136146326f: 2024-12-01T22:48:59,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/14efca635be3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T22:48:59,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T22:48:59,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T22:48:59,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-01T22:48:59,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 217 msec 2024-12-01T22:48:59,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37735 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T22:48:59,566 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T22:48:59,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T22:48:59,570 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T22:48:59,570 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:59,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,571 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T22:48:59,571 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T22:48:59,571 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1096291619, stopped=false 2024-12-01T22:48:59,571 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=14efca635be3,37735,1733093336598 2024-12-01T22:48:59,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:59,630 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:59,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:59,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T22:48:59,631 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T22:48:59,631 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:59,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:59,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:59,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:48:59,631 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T22:48:59,631 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:59,631 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,631 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,41365,1733093336769' ***** 2024-12-01T22:48:59,631 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:59,631 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,39869,1733093336813' ***** 2024-12-01T22:48:59,631 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:59,631 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '14efca635be3,38085,1733093336858' ***** 2024-12-01T22:48:59,632 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T22:48:59,632 INFO [RS:1;14efca635be3:39869 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:59,632 INFO [RS:2;14efca635be3:38085 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:59,632 INFO [RS:0;14efca635be3:41365 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T22:48:59,632 INFO [RS:0;14efca635be3:41365 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:59,632 INFO [RS:0;14efca635be3:41365 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:59,632 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:59,632 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:59,632 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,41365,1733093336769 2024-12-01T22:48:59,632 INFO [RS:2;14efca635be3:38085 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:59,632 INFO [RS:1;14efca635be3:39869 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T22:48:59,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:59,633 INFO [RS:0;14efca635be3:41365 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:59,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:59,633 INFO [RS:2;14efca635be3:38085 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:59,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:59,633 INFO [RS:1;14efca635be3:39869 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T22:48:59,632 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T22:48:59,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T22:48:59,633 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(3091): Received CLOSE for 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:59,633 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,38085,1733093336858 2024-12-01T22:48:59,633 INFO [RS:0;14efca635be3:41365 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;14efca635be3:41365. 2024-12-01T22:48:59,633 INFO [RS:2;14efca635be3:38085 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:59,633 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(959): stopping server 14efca635be3,39869,1733093336813 2024-12-01T22:48:59,633 DEBUG [RS:0;14efca635be3:41365 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:59,633 INFO [RS:1;14efca635be3:39869 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:48:59,633 DEBUG [RS:0;14efca635be3:41365 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,633 INFO [RS:1;14efca635be3:39869 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;14efca635be3:39869. 2024-12-01T22:48:59,633 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,41365,1733093336769; all regions closed. 2024-12-01T22:48:59,633 DEBUG [RS:1;14efca635be3:39869 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:59,633 DEBUG [RS:1;14efca635be3:39869 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,634 INFO [RS:1;14efca635be3:39869 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:59,634 INFO [RS:1;14efca635be3:39869 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:59,634 INFO [RS:1;14efca635be3:39869 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:59,634 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T22:48:59,635 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-01T22:48:59,635 INFO [RS:2;14efca635be3:38085 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;14efca635be3:38085. 2024-12-01T22:48:59,635 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 72bf82c5af4139f05c1900136146326f=TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f.} 2024-12-01T22:48:59,635 DEBUG [RS:2;14efca635be3:38085 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T22:48:59,635 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 72bf82c5af4139f05c1900136146326f 2024-12-01T22:48:59,635 DEBUG [RS:2;14efca635be3:38085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 72bf82c5af4139f05c1900136146326f, disabling compactions & flushes 2024-12-01T22:48:59,635 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,38085,1733093336858; all regions closed. 2024-12-01T22:48:59,635 INFO [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. after waiting 0 ms 2024-12-01T22:48:59,635 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T22:48:59,635 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T22:48:59,636 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T22:48:59,636 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T22:48:59,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,639 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,639 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,639 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,639 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:48:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741834_1010 (size=93) 2024-12-01T22:48:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741835_1011 (size=93) 2024-12-01T22:48:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741834_1010 (size=93) 2024-12-01T22:48:59,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741835_1011 (size=93) 2024-12-01T22:48:59,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741835_1011 (size=93) 2024-12-01T22:48:59,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741834_1010 (size=93) 2024-12-01T22:48:59,648 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/default/TestHBaseWalOnEC/72bf82c5af4139f05c1900136146326f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T22:48:59,650 INFO [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,650 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 72bf82c5af4139f05c1900136146326f: Waiting for close lock at 1733093339635Running coprocessor pre-close hooks at 1733093339635Disabling compacts and flushes for region at 1733093339635Disabling writes for close at 1733093339635Writing region close event to WAL at 1733093339637 (+2 ms)Running coprocessor post-close hooks at 1733093339649 (+12 ms)Closed at 1733093339650 (+1 ms) 2024-12-01T22:48:59,650 DEBUG [RS_CLOSE_REGION-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f. 2024-12-01T22:48:59,678 DEBUG [RS:0;14efca635be3:41365 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 14efca635be3%2C41365%2C1733093336769:(num 1733093338056) 2024-12-01T22:48:59,679 DEBUG [RS:0;14efca635be3:41365 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:59,679 DEBUG [RS:2;14efca635be3:38085 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:59,679 INFO [RS:2;14efca635be3:38085 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 14efca635be3%2C38085%2C1733093336858:(num 1733093338056) 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:59,679 DEBUG [RS:2;14efca635be3:38085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:59,679 INFO [RS:2;14efca635be3:38085 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:59,679 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:59,679 INFO [RS:0;14efca635be3:41365 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41365 2024-12-01T22:48:59,680 INFO [RS:2;14efca635be3:38085 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:48:59,680 INFO [RS:2;14efca635be3:38085 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T22:48:59,681 INFO [RS:2;14efca635be3:38085 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T22:48:59,681 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:48:59,681 INFO [RS:2;14efca635be3:38085 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T22:48:59,681 INFO [RS:2;14efca635be3:38085 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T22:48:59,681 INFO [RS:2;14efca635be3:38085 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:48:59,681 INFO [RS:2;14efca635be3:38085 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38085 2024-12-01T22:48:59,683 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,41365,1733093336769 2024-12-01T22:48:59,683 INFO [RS:0;14efca635be3:41365 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:59,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:48:59,688 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/info/db6d844f4c1c4be5a59516e07b1b7339 is 153, key is TestHBaseWalOnEC,,1733093338602.72bf82c5af4139f05c1900136146326f./info:regioninfo/1733093339012/Put/seqid=0 2024-12-01T22:48:59,693 INFO [RS:2;14efca635be3:38085 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:48:59,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,38085,1733093336858 2024-12-01T22:48:59,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741840_1016 (size=6637) 2024-12-01T22:48:59,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741840_1016 (size=6637) 2024-12-01T22:48:59,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741840_1016 (size=6637) 2024-12-01T22:48:59,703 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/info/db6d844f4c1c4be5a59516e07b1b7339 2024-12-01T22:48:59,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,41365,1733093336769] 2024-12-01T22:48:59,705 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:59,711 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:59,711 INFO [regionserver/14efca635be3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:48:59,725 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,41365,1733093336769 already deleted, retry=false 2024-12-01T22:48:59,725 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,41365,1733093336769 expired; onlineServers=2 2024-12-01T22:48:59,725 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,38085,1733093336858] 2024-12-01T22:48:59,733 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/ns/3b0e0428fe7343c1b54102ea1475bbce is 43, key is default/ns:d/1733093338489/Put/seqid=0 2024-12-01T22:48:59,735 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,38085,1733093336858 already deleted, retry=false 2024-12-01T22:48:59,735 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,38085,1733093336858 expired; onlineServers=1 2024-12-01T22:48:59,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741841_1017 (size=5153) 2024-12-01T22:48:59,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741841_1017 (size=5153) 2024-12-01T22:48:59,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741841_1017 (size=5153) 2024-12-01T22:48:59,743 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/ns/3b0e0428fe7343c1b54102ea1475bbce 2024-12-01T22:48:59,773 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/table/9d215305dcd348268f1175db886fe3ed is 52, key is TestHBaseWalOnEC/table:state/1733093339031/Put/seqid=0 2024-12-01T22:48:59,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741842_1018 (size=5249) 2024-12-01T22:48:59,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741842_1018 (size=5249) 2024-12-01T22:48:59,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741842_1018 (size=5249) 2024-12-01T22:48:59,784 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/table/9d215305dcd348268f1175db886fe3ed 2024-12-01T22:48:59,794 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/info/db6d844f4c1c4be5a59516e07b1b7339 as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/info/db6d844f4c1c4be5a59516e07b1b7339 2024-12-01T22:48:59,804 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:59,804 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41365-0x1019315a1ac0001, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:59,804 INFO [RS:0;14efca635be3:41365 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:59,804 INFO [RS:0;14efca635be3:41365 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,41365,1733093336769; zookeeper connection closed. 2024-12-01T22:48:59,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@364a8c8f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@364a8c8f 2024-12-01T22:48:59,806 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/info/db6d844f4c1c4be5a59516e07b1b7339, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T22:48:59,808 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/ns/3b0e0428fe7343c1b54102ea1475bbce as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/ns/3b0e0428fe7343c1b54102ea1475bbce 2024-12-01T22:48:59,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:59,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38085-0x1019315a1ac0003, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:48:59,815 INFO [RS:2;14efca635be3:38085 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:48:59,815 INFO [RS:2;14efca635be3:38085 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,38085,1733093336858; zookeeper connection closed. 2024-12-01T22:48:59,815 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@565fa7e3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@565fa7e3 2024-12-01T22:48:59,818 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/ns/3b0e0428fe7343c1b54102ea1475bbce, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T22:48:59,819 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/.tmp/table/9d215305dcd348268f1175db886fe3ed as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/table/9d215305dcd348268f1175db886fe3ed 2024-12-01T22:48:59,829 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/table/9d215305dcd348268f1175db886fe3ed, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T22:48:59,830 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 194ms, sequenceid=11, compaction requested=false 2024-12-01T22:48:59,835 DEBUG [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T22:48:59,840 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T22:48:59,848 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T22:48:59,848 INFO [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:59,849 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733093339635Running coprocessor pre-close hooks at 1733093339635Disabling compacts and flushes for region at 1733093339635Disabling writes for close at 1733093339636 (+1 ms)Obtaining lock to block concurrent updates at 1733093339636Preparing flush snapshotting stores in 1588230740 at 1733093339636Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733093339636Flushing stores of hbase:meta,,1.1588230740 at 1733093339638 (+2 ms)Flushing 1588230740/info: creating writer at 1733093339638Flushing 1588230740/info: appending metadata at 1733093339687 (+49 ms)Flushing 1588230740/info: closing flushed file at 1733093339687Flushing 1588230740/ns: creating writer at 1733093339711 (+24 ms)Flushing 1588230740/ns: appending metadata at 1733093339732 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733093339732Flushing 1588230740/table: creating writer at 1733093339752 (+20 ms)Flushing 1588230740/table: appending metadata at 1733093339772 (+20 ms)Flushing 1588230740/table: closing flushed file at 1733093339772Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2054996a: reopening flushed file at 1733093339792 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cebe2cd: reopening flushed file at 1733093339806 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c9f7bdb: reopening flushed file at 1733093339818 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 194ms, sequenceid=11, compaction requested=false at 1733093339831 (+13 ms)Writing region close event to WAL at 1733093339832 (+1 ms)Running coprocessor post-close hooks at 1733093339841 (+9 ms)Closed at 1733093339848 (+7 ms) 2024-12-01T22:48:59,849 DEBUG [RS_CLOSE_META-regionserver/14efca635be3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T22:48:59,994 INFO [regionserver/14efca635be3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T22:48:59,994 INFO [regionserver/14efca635be3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T22:49:00,036 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(976): stopping server 14efca635be3,39869,1733093336813; all regions closed. 2024-12-01T22:49:00,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,037 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,037 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741836_1012 (size=2751) 2024-12-01T22:49:00,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741836_1012 (size=2751) 2024-12-01T22:49:00,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741836_1012 (size=2751) 2024-12-01T22:49:00,043 DEBUG [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs 2024-12-01T22:49:00,043 INFO [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 14efca635be3%2C39869%2C1733093336813.meta:.meta(num 1733093338417) 2024-12-01T22:49:00,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741833_1009 (size=1298) 2024-12-01T22:49:00,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741833_1009 (size=1298) 2024-12-01T22:49:00,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741833_1009 (size=1298) 2024-12-01T22:49:00,053 DEBUG [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/oldWALs 2024-12-01T22:49:00,053 INFO [RS:1;14efca635be3:39869 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 14efca635be3%2C39869%2C1733093336813:(num 1733093338036) 2024-12-01T22:49:00,053 DEBUG [RS:1;14efca635be3:39869 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T22:49:00,053 INFO [RS:1;14efca635be3:39869 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T22:49:00,053 INFO [RS:1;14efca635be3:39869 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:49:00,053 INFO [RS:1;14efca635be3:39869 {}] hbase.ChoreService(370): Chore service for: regionserver/14efca635be3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T22:49:00,053 INFO [RS:1;14efca635be3:39869 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:49:00,053 INFO [regionserver/14efca635be3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:49:00,054 INFO [RS:1;14efca635be3:39869 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39869 2024-12-01T22:49:00,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T22:49:00,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/14efca635be3,39869,1733093336813 2024-12-01T22:49:00,104 INFO [RS:1;14efca635be3:39869 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:49:00,104 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$367/0x00007fdc708f53d0@7314b79c rejected from java.util.concurrent.ThreadPoolExecutor@3616279a[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-01T22:49:00,114 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [14efca635be3,39869,1733093336813] 2024-12-01T22:49:00,125 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/14efca635be3,39869,1733093336813 already deleted, retry=false 2024-12-01T22:49:00,125 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 14efca635be3,39869,1733093336813 expired; onlineServers=0 2024-12-01T22:49:00,125 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '14efca635be3,37735,1733093336598' ***** 2024-12-01T22:49:00,125 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T22:49:00,125 INFO [M:0;14efca635be3:37735 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T22:49:00,125 INFO [M:0;14efca635be3:37735 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T22:49:00,125 DEBUG [M:0;14efca635be3:37735 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T22:49:00,126 DEBUG [M:0;14efca635be3:37735 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T22:49:00,126 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T22:49:00,126 DEBUG [master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093337717 {}] cleaner.HFileCleaner(306): Exit Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.large.0-1733093337717,5,FailOnTimeoutGroup] 2024-12-01T22:49:00,126 DEBUG [master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093337718 {}] cleaner.HFileCleaner(306): Exit Thread[master/14efca635be3:0:becomeActiveMaster-HFileCleaner.small.0-1733093337718,5,FailOnTimeoutGroup] 2024-12-01T22:49:00,126 INFO [M:0;14efca635be3:37735 {}] hbase.ChoreService(370): Chore service for: master/14efca635be3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T22:49:00,126 INFO [M:0;14efca635be3:37735 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T22:49:00,126 DEBUG [M:0;14efca635be3:37735 {}] master.HMaster(1795): Stopping service threads 2024-12-01T22:49:00,126 INFO [M:0;14efca635be3:37735 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T22:49:00,126 INFO [M:0;14efca635be3:37735 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T22:49:00,126 INFO [M:0;14efca635be3:37735 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T22:49:00,127 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T22:49:00,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T22:49:00,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T22:49:00,136 DEBUG [M:0;14efca635be3:37735 {}] zookeeper.ZKUtil(347): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T22:49:00,136 WARN [M:0;14efca635be3:37735 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T22:49:00,136 INFO [M:0;14efca635be3:37735 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/.lastflushedseqids 2024-12-01T22:49:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741843_1019 (size=127) 2024-12-01T22:49:00,152 INFO [M:0;14efca635be3:37735 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T22:49:00,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741843_1019 (size=127) 2024-12-01T22:49:00,152 INFO [M:0;14efca635be3:37735 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T22:49:00,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741843_1019 (size=127) 2024-12-01T22:49:00,152 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T22:49:00,152 INFO [M:0;14efca635be3:37735 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:49:00,152 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:49:00,152 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T22:49:00,152 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:49:00,152 INFO [M:0;14efca635be3:37735 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-01T22:49:00,168 DEBUG [M:0;14efca635be3:37735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a0349d0e6464e7c8114f13d00479f64 is 82, key is hbase:meta,,1/info:regioninfo/1733093338464/Put/seqid=0 2024-12-01T22:49:00,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741844_1020 (size=5672) 2024-12-01T22:49:00,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741844_1020 (size=5672) 2024-12-01T22:49:00,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741844_1020 (size=5672) 2024-12-01T22:49:00,178 INFO [M:0;14efca635be3:37735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a0349d0e6464e7c8114f13d00479f64 2024-12-01T22:49:00,201 DEBUG [M:0;14efca635be3:37735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ea015cffe04ca3bc270a75b5127005 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733093339038/Put/seqid=0 2024-12-01T22:49:00,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741845_1021 (size=6440) 2024-12-01T22:49:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741845_1021 (size=6440) 2024-12-01T22:49:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741845_1021 (size=6440) 2024-12-01T22:49:00,212 INFO [M:0;14efca635be3:37735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ea015cffe04ca3bc270a75b5127005 2024-12-01T22:49:00,215 INFO [RS:1;14efca635be3:39869 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:49:00,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:49:00,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39869-0x1019315a1ac0002, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:49:00,215 INFO [RS:1;14efca635be3:39869 {}] regionserver.HRegionServer(1031): Exiting; stopping=14efca635be3,39869,1733093336813; zookeeper connection closed. 2024-12-01T22:49:00,215 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2914d140 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2914d140 2024-12-01T22:49:00,215 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T22:49:00,231 DEBUG [M:0;14efca635be3:37735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a6123e66105459e9f26da4619f7de2b is 69, key is 14efca635be3,38085,1733093336858/rs:state/1733093337829/Put/seqid=0 2024-12-01T22:49:00,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741846_1022 (size=5294) 2024-12-01T22:49:00,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741846_1022 (size=5294) 2024-12-01T22:49:00,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741846_1022 (size=5294) 2024-12-01T22:49:00,250 INFO [M:0;14efca635be3:37735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a6123e66105459e9f26da4619f7de2b 2024-12-01T22:49:00,258 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a0349d0e6464e7c8114f13d00479f64 as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a0349d0e6464e7c8114f13d00479f64 2024-12-01T22:49:00,265 INFO [M:0;14efca635be3:37735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a0349d0e6464e7c8114f13d00479f64, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T22:49:00,266 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ea015cffe04ca3bc270a75b5127005 as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3ea015cffe04ca3bc270a75b5127005 2024-12-01T22:49:00,271 INFO [M:0;14efca635be3:37735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3ea015cffe04ca3bc270a75b5127005, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T22:49:00,273 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a6123e66105459e9f26da4619f7de2b as hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a6123e66105459e9f26da4619f7de2b 2024-12-01T22:49:00,279 INFO [M:0;14efca635be3:37735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44601/user/jenkins/test-data/74e3fb5d-5b81-d79f-8ecc-3f27e7b8f599/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a6123e66105459e9f26da4619f7de2b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T22:49:00,281 INFO [M:0;14efca635be3:37735 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false 2024-12-01T22:49:00,284 INFO [M:0;14efca635be3:37735 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T22:49:00,284 DEBUG [M:0;14efca635be3:37735 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733093340152Disabling compacts and flushes for region at 1733093340152Disabling writes for close at 1733093340152Obtaining lock to block concurrent updates at 1733093340152Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733093340152Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733093340153 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733093340153Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733093340154 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733093340167 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733093340167Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733093340185 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733093340201 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733093340201Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733093340218 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733093340230 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733093340231 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58bc6748: reopening flushed file at 1733093340256 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54ef7545: reopening flushed file at 1733093340265 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26206a0e: reopening flushed file at 1733093340272 (+7 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false at 1733093340281 (+9 ms)Writing region close event to WAL at 1733093340284 (+3 ms)Closed at 1733093340284 2024-12-01T22:49:00,284 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,285 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,285 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,285 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T22:49:00,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741830_1006 (size=32686) 2024-12-01T22:49:00,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42687 is added to blk_1073741830_1006 (size=32686) 2024-12-01T22:49:00,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41185 is added to blk_1073741830_1006 (size=32686) 2024-12-01T22:49:00,289 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T22:49:00,289 INFO [M:0;14efca635be3:37735 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T22:49:00,289 INFO [M:0;14efca635be3:37735 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37735 2024-12-01T22:49:00,289 INFO [M:0;14efca635be3:37735 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T22:49:00,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:49:00,393 INFO [M:0;14efca635be3:37735 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T22:49:00,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37735-0x1019315a1ac0000, quorum=127.0.0.1:55536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T22:49:00,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1183a3bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:49:00,396 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ee4ec12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:49:00,396 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:49:00,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73f6422f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:49:00,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da8210{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,STOPPED} 2024-12-01T22:49:00,397 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:49:00,398 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604229375-172.17.0.2-1733093333546 (Datanode Uuid d5e32e25-5c39-4fda-8105-ef5d97ca22cb) service to localhost/127.0.0.1:44601 2024-12-01T22:49:00,398 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:49:00,398 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:49:00,398 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data5/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,398 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data6/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,399 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:49:00,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b340784{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:49:00,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1872922a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:49:00,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:49:00,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c597470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:49:00,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb1336{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,STOPPED} 2024-12-01T22:49:00,407 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:49:00,407 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:49:00,407 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604229375-172.17.0.2-1733093333546 (Datanode Uuid 1c59fa26-629c-46ae-b078-d80f2b310286) service to localhost/127.0.0.1:44601 2024-12-01T22:49:00,407 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:49:00,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data3/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data4/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,408 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:49:00,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@582dea15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T22:49:00,413 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f6d3ff7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:49:00,413 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:49:00,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@137179d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:49:00,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a92fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,STOPPED} 2024-12-01T22:49:00,415 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T22:49:00,415 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T22:49:00,415 WARN [BP-1604229375-172.17.0.2-1733093333546 heartbeating to localhost/127.0.0.1:44601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604229375-172.17.0.2-1733093333546 (Datanode Uuid 6c5f984b-6f03-4cf8-8c5c-7929410ba23f) service to localhost/127.0.0.1:44601 2024-12-01T22:49:00,415 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T22:49:00,415 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data1/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,415 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/cluster_2b2c1e36-2c86-8310-2570-f88700f7ce62/data/data2/current/BP-1604229375-172.17.0.2-1733093333546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T22:49:00,416 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T22:49:00,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f04037c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T22:49:00,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11bed883{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T22:49:00,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T22:49:00,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f854cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T22:49:00,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aa9d3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ff03ba9-b0d8-a0d3-6c3f-e1be4d455864/hadoop.log.dir/,STOPPED} 2024-12-01T22:49:00,431 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T22:49:00,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T22:49:00,466 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=145 (was 86) - Thread LEAK? -, OpenFileDescriptor=516 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=299 (was 308), ProcessCount=11 (was 11), AvailableMemoryMB=9250 (was 9455)