2024-12-05 00:51:37,005 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 00:51:37,026 main DEBUG Took 0.017299 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 00:51:37,026 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 00:51:37,027 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 00:51:37,028 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 00:51:37,029 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,043 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 00:51:37,067 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,069 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,069 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,070 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,071 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,071 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,072 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,072 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,073 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,073 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,074 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,074 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,074 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,074 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,075 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,075 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,075 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,076 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,076 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,076 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,077 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,077 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,077 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,077 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:51:37,078 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,078 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 00:51:37,079 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:51:37,080 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 00:51:37,082 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 00:51:37,082 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 00:51:37,084 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 00:51:37,084 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 00:51:37,095 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 00:51:37,097 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 00:51:37,099 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 00:51:37,099 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 00:51:37,100 main DEBUG createAppenders(={Console}) 2024-12-05 00:51:37,100 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-05 00:51:37,101 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 00:51:37,101 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-05 00:51:37,101 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 00:51:37,102 main DEBUG OutputStream closed 2024-12-05 00:51:37,102 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 00:51:37,102 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 00:51:37,102 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-05 00:51:37,195 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 00:51:37,197 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 00:51:37,199 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 00:51:37,201 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 00:51:37,201 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 00:51:37,202 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 00:51:37,202 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 00:51:37,203 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 00:51:37,204 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 00:51:37,204 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 00:51:37,205 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 00:51:37,205 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 00:51:37,206 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 00:51:37,206 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 00:51:37,206 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 00:51:37,207 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 00:51:37,207 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 00:51:37,208 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 00:51:37,211 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 00:51:37,211 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-05 00:51:37,212 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 00:51:37,213 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-05T00:51:37,232 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-05 00:51:37,236 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 00:51:37,236 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T00:51:37,550 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29 2024-12-05T00:51:37,581 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3, deleteOnExit=true 2024-12-05T00:51:37,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/test.cache.data in system properties and HBase conf 2024-12-05T00:51:37,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:51:37,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:51:37,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:51:37,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:51:37,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:51:37,700 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T00:51:37,812 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:51:37,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:51:37,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:51:37,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:51:37,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:51:37,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:51:37,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:51:37,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:51:37,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:51:37,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:51:37,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:51:37,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:51:37,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:51:37,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:51:37,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:51:38,787 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T00:51:38,857 INFO [Time-limited test {}] log.Log(170): Logging initialized @2640ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T00:51:38,918 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:38,970 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:38,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:38,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:38,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:51:39,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:39,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e18bd18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:39,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2faf2775{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:39,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e22261{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/java.io.tmpdir/jetty-localhost-45585-hadoop-hdfs-3_4_1-tests_jar-_-any-18187199317462687060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:51:39,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3599471c{HTTP/1.1, (http/1.1)}{localhost:45585} 2024-12-05T00:51:39,169 INFO [Time-limited test {}] server.Server(415): Started @2953ms 2024-12-05T00:51:39,709 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:39,716 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:39,717 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:39,717 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:39,717 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:51:39,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7728820b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:39,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a906869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:39,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bf2c732{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/java.io.tmpdir/jetty-localhost-45223-hadoop-hdfs-3_4_1-tests_jar-_-any-18239991096218626972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:39,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1182e874{HTTP/1.1, (http/1.1)}{localhost:45223} 2024-12-05T00:51:39,816 INFO [Time-limited test {}] server.Server(415): Started @3600ms 2024-12-05T00:51:39,861 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:39,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:39,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:39,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:39,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:39,968 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:51:39,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23e84c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:39,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28ffdd72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:40,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41033a80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/java.io.tmpdir/jetty-localhost-39239-hadoop-hdfs-3_4_1-tests_jar-_-any-17901443667188316436/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:40,092 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14721f03{HTTP/1.1, (http/1.1)}{localhost:39239} 2024-12-05T00:51:40,092 INFO [Time-limited test {}] server.Server(415): Started @3876ms 2024-12-05T00:51:40,095 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:40,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:40,131 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:40,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:40,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:40,132 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:51:40,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@435daa1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:40,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41dce2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:40,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67fa62aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/java.io.tmpdir/jetty-localhost-39933-hadoop-hdfs-3_4_1-tests_jar-_-any-7447592874387321660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:40,228 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e9f3a79{HTTP/1.1, (http/1.1)}{localhost:39933} 2024-12-05T00:51:40,228 INFO [Time-limited test {}] server.Server(415): Started @4012ms 2024-12-05T00:51:40,230 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:41,592 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data3/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,592 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data2/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,592 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data4/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,592 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data1/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,619 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:41,619 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:41,651 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data5/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,651 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data6/current/BP-467069894-172.17.0.2-1733359898308/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:41,672 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:41,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29da856cf90a64b4 with lease ID 0xafbdb4aaae71eec6: Processing first storage report for DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c from datanode DatanodeRegistration(127.0.0.1:39373, datanodeUuid=87464b85-7c71-4e59-b28b-53e7b0e0c330, infoPort=40901, infoSecurePort=0, ipcPort=46255, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29da856cf90a64b4 with lease ID 0xafbdb4aaae71eec6: from storage DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c node DatanodeRegistration(127.0.0.1:39373, datanodeUuid=87464b85-7c71-4e59-b28b-53e7b0e0c330, infoPort=40901, infoSecurePort=0, ipcPort=46255, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e429462e5d2ac5d with lease ID 0xafbdb4aaae71eec5: Processing first storage report for DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee from datanode DatanodeRegistration(127.0.0.1:35037, datanodeUuid=a3657a6b-75ae-488c-baff-06e397515557, infoPort=39857, infoSecurePort=0, ipcPort=41395, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e429462e5d2ac5d with lease ID 0xafbdb4aaae71eec5: from storage DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee node DatanodeRegistration(127.0.0.1:35037, datanodeUuid=a3657a6b-75ae-488c-baff-06e397515557, infoPort=39857, infoSecurePort=0, ipcPort=41395, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3154d608f6f92897 with lease ID 0xafbdb4aaae71eec4: Processing first storage report for DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab from datanode DatanodeRegistration(127.0.0.1:35255, datanodeUuid=b12e04d0-6876-4bff-ad8b-ddc923f4168b, infoPort=38625, infoSecurePort=0, ipcPort=34351, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3154d608f6f92897 with lease ID 0xafbdb4aaae71eec4: from storage DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab node DatanodeRegistration(127.0.0.1:35255, datanodeUuid=b12e04d0-6876-4bff-ad8b-ddc923f4168b, infoPort=38625, infoSecurePort=0, ipcPort=34351, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29da856cf90a64b4 with lease ID 0xafbdb4aaae71eec6: Processing first storage report for DS-24de3abd-66f0-479d-bd2b-9ee04f7cdcc3 from datanode DatanodeRegistration(127.0.0.1:39373, datanodeUuid=87464b85-7c71-4e59-b28b-53e7b0e0c330, infoPort=40901, infoSecurePort=0, ipcPort=46255, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29da856cf90a64b4 with lease ID 0xafbdb4aaae71eec6: from storage DS-24de3abd-66f0-479d-bd2b-9ee04f7cdcc3 node DatanodeRegistration(127.0.0.1:39373, datanodeUuid=87464b85-7c71-4e59-b28b-53e7b0e0c330, infoPort=40901, infoSecurePort=0, ipcPort=46255, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e429462e5d2ac5d with lease ID 0xafbdb4aaae71eec5: Processing first storage report for DS-f2f9f978-80f7-4666-abb7-4581e88551ba from datanode DatanodeRegistration(127.0.0.1:35037, datanodeUuid=a3657a6b-75ae-488c-baff-06e397515557, infoPort=39857, infoSecurePort=0, ipcPort=41395, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e429462e5d2ac5d with lease ID 0xafbdb4aaae71eec5: from storage DS-f2f9f978-80f7-4666-abb7-4581e88551ba node DatanodeRegistration(127.0.0.1:35037, datanodeUuid=a3657a6b-75ae-488c-baff-06e397515557, infoPort=39857, infoSecurePort=0, ipcPort=41395, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3154d608f6f92897 with lease ID 0xafbdb4aaae71eec4: Processing first storage report for DS-a0ecfa1c-6fce-4b49-8fbb-c99472d9893d from datanode DatanodeRegistration(127.0.0.1:35255, datanodeUuid=b12e04d0-6876-4bff-ad8b-ddc923f4168b, infoPort=38625, infoSecurePort=0, ipcPort=34351, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308) 2024-12-05T00:51:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3154d608f6f92897 with lease ID 0xafbdb4aaae71eec4: from storage DS-a0ecfa1c-6fce-4b49-8fbb-c99472d9893d node DatanodeRegistration(127.0.0.1:35255, datanodeUuid=b12e04d0-6876-4bff-ad8b-ddc923f4168b, infoPort=38625, infoSecurePort=0, ipcPort=34351, storageInfo=lv=-57;cid=testClusterID;nsid=1956499939;c=1733359898308), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:41,729 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29 2024-12-05T00:51:41,798 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-05T00:51:41,852 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=668, ProcessCount=11, AvailableMemoryMB=9972 2024-12-05T00:51:41,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:51:41,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-05T00:51:41,967 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/zookeeper_0, clientPort=55679, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:51:41,995 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55679 2024-12-05T00:51:42,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:42,014 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:42,115 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:42,116 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:42,163 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33140 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33140 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:42,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-05T00:51:42,582 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:42,594 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 with version=8 2024-12-05T00:51:42,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/hbase-staging 2024-12-05T00:51:42,714 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T00:51:42,956 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:42,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:42,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:42,969 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:42,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:42,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:43,087 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:51:43,149 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T00:51:43,156 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T00:51:43,159 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:43,179 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 77635 (auto-detected) 2024-12-05T00:51:43,180 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T00:51:43,197 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39757 2024-12-05T00:51:43,220 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39757 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-12-05T00:51:43,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:397570x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:43,420 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39757-0x101a2f918a20000 connected 2024-12-05T00:51:43,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:43,540 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693, hbase.cluster.distributed=false 2024-12-05T00:51:43,566 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:43,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39757 2024-12-05T00:51:43,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39757 2024-12-05T00:51:43,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39757 2024-12-05T00:51:43,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39757 2024-12-05T00:51:43,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39757 2024-12-05T00:51:43,670 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:43,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,672 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:43,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:43,675 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:43,677 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:43,678 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42635 2024-12-05T00:51:43,680 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42635 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-12-05T00:51:43,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,684 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426350x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:43,699 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426350x0, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:43,699 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42635-0x101a2f918a20001 connected 2024-12-05T00:51:43,702 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:43,709 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:43,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:43,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:43,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42635 2024-12-05T00:51:43,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42635 2024-12-05T00:51:43,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42635 2024-12-05T00:51:43,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42635 2024-12-05T00:51:43,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42635 2024-12-05T00:51:43,736 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:43,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,737 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:43,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:43,738 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:43,738 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:43,739 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39803 2024-12-05T00:51:43,741 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39803 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-12-05T00:51:43,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,783 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398030x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:43,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39803-0x101a2f918a20002 connected 2024-12-05T00:51:43,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:43,784 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:43,785 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:43,786 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:43,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:43,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39803 2024-12-05T00:51:43,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39803 2024-12-05T00:51:43,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39803 2024-12-05T00:51:43,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39803 2024-12-05T00:51:43,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39803 2024-12-05T00:51:43,805 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:43,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,806 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:43,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:43,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:43,807 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:43,807 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:43,808 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41065 2024-12-05T00:51:43,809 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41065 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-12-05T00:51:43,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:43,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410650x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:43,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:410650x0, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:43,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41065-0x101a2f918a20003 connected 2024-12-05T00:51:43,826 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:43,827 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:43,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:43,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:43,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41065 2024-12-05T00:51:43,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41065 2024-12-05T00:51:43,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41065 2024-12-05T00:51:43,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41065 2024-12-05T00:51:43,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41065 2024-12-05T00:51:43,853 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fea72ea5c4b6:39757 2024-12-05T00:51:43,854 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:43,867 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,869 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:43,894 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:43,894 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:43,895 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:51:43,896 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fea72ea5c4b6,39757,1733359902807 from backup master directory 2024-12-05T00:51:43,915 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:43,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:43,916 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:43,916 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:43,918 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T00:51:43,920 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T00:51:43,975 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/hbase.id] with ID: 95ac2848-fd5d-471e-b44c-d69239aaba32 2024-12-05T00:51:43,975 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/.tmp/hbase.id 2024-12-05T00:51:43,982 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:43,982 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:43,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33162 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33162 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:43,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-05T00:51:43,991 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:43,992 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/.tmp/hbase.id]:[hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/hbase.id] 2024-12-05T00:51:44,035 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:44,040 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:51:44,057 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-05T00:51:44,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,066 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,078 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,079 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33182 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33182 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:44,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-05T00:51:44,087 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:44,100 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:51:44,102 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:51:44,109 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:51:44,140 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,140 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33208 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33208 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-05T00:51:44,149 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:44,167 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store 2024-12-05T00:51:44,184 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,184 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:53316 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53316 dst: /127.0.0.1:35037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:44,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-05T00:51:44,192 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:44,196 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T00:51:44,198 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:44,199 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:51:44,200 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:44,200 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:44,201 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:51:44,201 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:44,201 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:44,202 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733359904199Disabling compacts and flushes for region at 1733359904199Disabling writes for close at 1733359904201 (+2 ms)Writing region close event to WAL at 1733359904201Closed at 1733359904201 2024-12-05T00:51:44,204 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/.initializing 2024-12-05T00:51:44,204 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/WALs/fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:44,211 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:51:44,224 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C39757%2C1733359902807, suffix=, logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/WALs/fea72ea5c4b6,39757,1733359902807, archiveDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/oldWALs, maxLogs=10 2024-12-05T00:51:44,260 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/WALs/fea72ea5c4b6,39757,1733359902807/fea72ea5c4b6%2C39757%2C1733359902807.1733359904230, exclude list is [], retry=0 2024-12-05T00:51:44,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:44,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35037,DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee,DISK] 2024-12-05T00:51:44,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39373,DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c,DISK] 2024-12-05T00:51:44,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35255,DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab,DISK] 2024-12-05T00:51:44,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T00:51:44,320 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/WALs/fea72ea5c4b6,39757,1733359902807/fea72ea5c4b6%2C39757%2C1733359902807.1733359904230 2024-12-05T00:51:44,320 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39857:39857),(127.0.0.1/127.0.0.1:40901:40901),(127.0.0.1/127.0.0.1:38625:38625)] 2024-12-05T00:51:44,321 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:44,321 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:44,324 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,325 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:51:44,385 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:44,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:44,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:51:44,392 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:44,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:44,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:51:44,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:44,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:44,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:51:44,399 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:44,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:44,400 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,403 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,404 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,409 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,409 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,412 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:44,416 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:44,422 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:44,424 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62427513, jitterRate=-0.06975756585597992}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:44,431 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733359904336Initializing all the Stores at 1733359904338 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359904339 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359904339Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359904339Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359904339Cleaning up temporary data from old regions at 1733359904409 (+70 ms)Region opened successfully at 1733359904431 (+22 ms) 2024-12-05T00:51:44,432 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:51:44,467 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c84563f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:44,498 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:51:44,511 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:51:44,511 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:51:44,514 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:51:44,516 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T00:51:44,521 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-05T00:51:44,521 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:51:44,548 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:51:44,556 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:51:44,599 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:51:44,601 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:51:44,603 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:51:44,614 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:51:44,616 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:51:44,619 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:51:44,630 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:51:44,632 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:51:44,641 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:51:44,657 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:51:44,666 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:51:44,677 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:44,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:44,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:44,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:44,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-05T00:51:44,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,679 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-05T00:51:44,681 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fea72ea5c4b6,39757,1733359902807, sessionid=0x101a2f918a20000, setting cluster-up flag (Was=false) 2024-12-05T00:51:44,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,708 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,740 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:51:44,742 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:44,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,767 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:44,799 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:51:44,801 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:44,806 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:51:44,836 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(746): ClusterId : 95ac2848-fd5d-471e-b44c-d69239aaba32 2024-12-05T00:51:44,836 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(746): ClusterId : 95ac2848-fd5d-471e-b44c-d69239aaba32 2024-12-05T00:51:44,836 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(746): ClusterId : 95ac2848-fd5d-471e-b44c-d69239aaba32 2024-12-05T00:51:44,839 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:44,839 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:44,839 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:44,864 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:44,864 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:44,864 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:44,864 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:44,865 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:44,865 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:44,871 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:44,879 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:51:44,885 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:51:44,889 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:44,889 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:44,889 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:44,889 DEBUG [RS:2;fea72ea5c4b6:41065 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@779024c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:44,889 DEBUG [RS:0;fea72ea5c4b6:42635 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38c6ee1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:44,889 DEBUG [RS:1;fea72ea5c4b6:39803 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29ad67b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:44,893 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fea72ea5c4b6,39757,1733359902807 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:51:44,902 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fea72ea5c4b6:41065 2024-12-05T00:51:44,906 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:44,906 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:44,906 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:44,906 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:44,906 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:44,906 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:44,906 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:44,906 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fea72ea5c4b6:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:51:44,907 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:44,907 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:44,907 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:44,909 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,39757,1733359902807 with port=41065, startcode=1733359903805 2024-12-05T00:51:44,911 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fea72ea5c4b6:42635 2024-12-05T00:51:44,911 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:44,911 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:44,912 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:44,914 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,39757,1733359902807 with port=42635, startcode=1733359903638 2024-12-05T00:51:44,914 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fea72ea5c4b6:39803 2024-12-05T00:51:44,914 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:44,914 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:44,914 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:44,915 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,39757,1733359902807 with port=39803, startcode=1733359903735 2024-12-05T00:51:44,918 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:44,918 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:51:44,922 DEBUG [RS:2;fea72ea5c4b6:41065 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:44,922 DEBUG [RS:1;fea72ea5c4b6:39803 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:44,922 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733359934922 2024-12-05T00:51:44,923 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:51:44,924 DEBUG [RS:0;fea72ea5c4b6:42635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:44,925 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:51:44,925 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:44,925 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:51:44,929 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:51:44,929 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:51:44,929 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:51:44,930 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:51:44,930 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:44,942 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:51:44,943 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:51:44,944 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:51:44,945 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,946 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:44,961 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:51:44,961 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:51:44,963 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359904963,5,FailOnTimeoutGroup] 2024-12-05T00:51:44,964 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359904964,5,FailOnTimeoutGroup] 2024-12-05T00:51:44,964 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:44,964 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:51:44,969 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:44,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33230 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33230 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:44,971 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:44,971 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44699, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:44,971 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34427, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:44,972 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46077, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:44,977 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:44,979 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:44,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-05T00:51:44,985 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:44,986 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:51:44,986 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 2024-12-05T00:51:44,992 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:44,992 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:44,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:44,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39757 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:44,996 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 2024-12-05T00:51:44,996 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 2024-12-05T00:51:44,996 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40131 2024-12-05T00:51:44,996 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40131 2024-12-05T00:51:44,997 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:44,997 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:45,000 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:45,000 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:45,001 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 2024-12-05T00:51:45,001 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40131 2024-12-05T00:51:45,001 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:45,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:53354 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53354 dst: /127.0.0.1:35037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-05T00:51:45,012 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:45,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:45,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:51:45,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:51:45,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:45,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:45,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:51:45,057 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:51:45,057 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:45,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:45,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:51:45,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:51:45,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:45,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:45,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:51:45,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:51:45,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:45,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:45,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:51:45,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740 2024-12-05T00:51:45,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740 2024-12-05T00:51:45,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:51:45,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:51:45,070 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:45,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:51:45,077 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:45,078 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60150185, jitterRate=-0.10369239747524261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:45,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733359905013Initializing all the Stores at 1733359905015 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359905015Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359905015Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359905015Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359905015Cleaning up temporary data from old regions at 1733359905069 (+54 ms)Region opened successfully at 1733359905081 (+12 ms) 2024-12-05T00:51:45,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:51:45,082 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:51:45,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:51:45,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:51:45,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:51:45,084 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:45,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733359905082Disabling compacts and flushes for region at 1733359905082Disabling writes for close at 1733359905082Writing region close event to WAL at 1733359905083 (+1 ms)Closed at 1733359905084 (+1 ms) 2024-12-05T00:51:45,087 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:45,087 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:51:45,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:51:45,103 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:51:45,106 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:51:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:45,257 WARN [fea72ea5c4b6:39757 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:51:45,275 DEBUG [RS:0;fea72ea5c4b6:42635 {}] zookeeper.ZKUtil(111): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:45,275 DEBUG [RS:2;fea72ea5c4b6:41065 {}] zookeeper.ZKUtil(111): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:45,275 WARN [RS:2;fea72ea5c4b6:41065 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:45,275 WARN [RS:0;fea72ea5c4b6:42635 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:45,275 INFO [RS:0;fea72ea5c4b6:42635 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:51:45,275 INFO [RS:2;fea72ea5c4b6:41065 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:51:45,276 DEBUG [RS:1;fea72ea5c4b6:39803 {}] zookeeper.ZKUtil(111): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:45,276 DEBUG [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:45,276 WARN [RS:1;fea72ea5c4b6:39803 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:45,276 INFO [RS:1;fea72ea5c4b6:39803 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:51:45,276 DEBUG [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:45,276 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:45,277 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,42635,1733359903638] 2024-12-05T00:51:45,277 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,41065,1733359903805] 2024-12-05T00:51:45,277 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,39803,1733359903735] 2024-12-05T00:51:45,302 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:45,302 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:45,302 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:45,316 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:45,316 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:45,316 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:45,321 INFO [RS:0;fea72ea5c4b6:42635 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:45,321 INFO [RS:2;fea72ea5c4b6:41065 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:45,321 INFO [RS:1;fea72ea5c4b6:39803 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:45,321 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,321 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,321 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,322 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:45,322 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:45,322 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:45,328 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:45,328 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:45,328 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:45,329 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,329 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,329 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,330 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:45,330 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:45,330 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,331 DEBUG [RS:1;fea72ea5c4b6:39803 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,331 DEBUG [RS:0;fea72ea5c4b6:42635 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,331 DEBUG [RS:2;fea72ea5c4b6:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,333 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,42635,1733359903638-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,335 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,336 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39803,1733359903735-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:45,336 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,336 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,336 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,41065,1733359903805-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:45,351 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:45,352 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,42635,1733359903638-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,353 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,353 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.Replication(171): fea72ea5c4b6,42635,1733359903638 started 2024-12-05T00:51:45,357 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:45,357 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:45,358 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,41065,1733359903805-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,358 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39803,1733359903735-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,358 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,358 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.Replication(171): fea72ea5c4b6,39803,1733359903735 started 2024-12-05T00:51:45,358 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,358 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.Replication(171): fea72ea5c4b6,41065,1733359903805 started 2024-12-05T00:51:45,371 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,371 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,42635,1733359903638, RpcServer on fea72ea5c4b6/172.17.0.2:42635, sessionid=0x101a2f918a20001 2024-12-05T00:51:45,372 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:45,372 DEBUG [RS:0;fea72ea5c4b6:42635 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:45,372 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,42635,1733359903638' 2024-12-05T00:51:45,372 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:45,373 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:45,374 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:45,374 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:45,374 DEBUG [RS:0;fea72ea5c4b6:42635 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:45,374 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,42635,1733359903638' 2024-12-05T00:51:45,374 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:45,375 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:45,375 DEBUG [RS:0;fea72ea5c4b6:42635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:45,376 INFO [RS:0;fea72ea5c4b6:42635 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:45,376 INFO [RS:0;fea72ea5c4b6:42635 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:45,379 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,379 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:45,380 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,39803,1733359903735, RpcServer on fea72ea5c4b6/172.17.0.2:39803, sessionid=0x101a2f918a20002 2024-12-05T00:51:45,380 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,41065,1733359903805, RpcServer on fea72ea5c4b6/172.17.0.2:41065, sessionid=0x101a2f918a20003 2024-12-05T00:51:45,380 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:45,380 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:45,380 DEBUG [RS:1;fea72ea5c4b6:39803 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:45,380 DEBUG [RS:2;fea72ea5c4b6:41065 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:45,380 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,39803,1733359903735' 2024-12-05T00:51:45,380 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,41065,1733359903805' 2024-12-05T00:51:45,380 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:45,380 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:45,381 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:45,381 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,41065,1733359903805' 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,39803,1733359903735' 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:45,382 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:45,382 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:45,383 DEBUG [RS:2;fea72ea5c4b6:41065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:45,383 DEBUG [RS:1;fea72ea5c4b6:39803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:45,383 INFO [RS:2;fea72ea5c4b6:41065 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:45,383 INFO [RS:1;fea72ea5c4b6:39803 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:45,383 INFO [RS:2;fea72ea5c4b6:41065 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:45,383 INFO [RS:1;fea72ea5c4b6:39803 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:45,480 INFO [RS:0;fea72ea5c4b6:42635 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:51:45,483 INFO [RS:0;fea72ea5c4b6:42635 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C42635%2C1733359903638, suffix=, logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,42635,1733359903638, archiveDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs, maxLogs=32 2024-12-05T00:51:45,484 INFO [RS:2;fea72ea5c4b6:41065 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:51:45,484 INFO [RS:1;fea72ea5c4b6:39803 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:51:45,486 INFO [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C39803%2C1733359903735, suffix=, logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735, archiveDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs, maxLogs=32 2024-12-05T00:51:45,486 INFO [RS:2;fea72ea5c4b6:41065 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C41065%2C1733359903805, suffix=, logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,41065,1733359903805, archiveDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs, maxLogs=32 2024-12-05T00:51:45,497 DEBUG [RS:0;fea72ea5c4b6:42635 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,42635,1733359903638/fea72ea5c4b6%2C42635%2C1733359903638.1733359905485, exclude list is [], retry=0 2024-12-05T00:51:45,500 DEBUG [RS:1;fea72ea5c4b6:39803 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735/fea72ea5c4b6%2C39803%2C1733359903735.1733359905488, exclude list is [], retry=0 2024-12-05T00:51:45,501 DEBUG [RS:2;fea72ea5c4b6:41065 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,41065,1733359903805/fea72ea5c4b6%2C41065%2C1733359903805.1733359905488, exclude list is [], retry=0 2024-12-05T00:51:45,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35037,DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee,DISK] 2024-12-05T00:51:45,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39373,DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c,DISK] 2024-12-05T00:51:45,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35255,DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab,DISK] 2024-12-05T00:51:45,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35255,DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab,DISK] 2024-12-05T00:51:45,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39373,DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c,DISK] 2024-12-05T00:51:45,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35255,DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab,DISK] 2024-12-05T00:51:45,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35037,DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee,DISK] 2024-12-05T00:51:45,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39373,DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c,DISK] 2024-12-05T00:51:45,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35037,DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee,DISK] 2024-12-05T00:51:45,539 INFO [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735/fea72ea5c4b6%2C39803%2C1733359903735.1733359905488 2024-12-05T00:51:45,540 DEBUG [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:40901:40901),(127.0.0.1/127.0.0.1:39857:39857)] 2024-12-05T00:51:45,541 INFO [RS:0;fea72ea5c4b6:42635 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,42635,1733359903638/fea72ea5c4b6%2C42635%2C1733359903638.1733359905485 2024-12-05T00:51:45,541 DEBUG [RS:0;fea72ea5c4b6:42635 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:39857:39857),(127.0.0.1/127.0.0.1:40901:40901)] 2024-12-05T00:51:45,543 INFO [RS:2;fea72ea5c4b6:41065 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,41065,1733359903805/fea72ea5c4b6%2C41065%2C1733359903805.1733359905488 2024-12-05T00:51:45,544 DEBUG [RS:2;fea72ea5c4b6:41065 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:39857:39857),(127.0.0.1/127.0.0.1:40901:40901)] 2024-12-05T00:51:45,759 DEBUG [fea72ea5c4b6:39757 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T00:51:45,767 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(204): Hosts are {fea72ea5c4b6=0} racks are {/default-rack=0} 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:51:45,772 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:51:45,772 INFO [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:51:45,772 INFO [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:51:45,773 INFO [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:51:45,773 DEBUG [fea72ea5c4b6:39757 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:51:45,778 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:45,784 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fea72ea5c4b6,39803,1733359903735, state=OPENING 2024-12-05T00:51:45,830 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:51:45,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:45,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:45,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:45,841 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:45,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:45,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:45,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:45,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:45,843 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:51:45,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,39803,1733359903735}] 2024-12-05T00:51:46,016 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:51:46,019 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50143, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:51:46,031 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:51:46,031 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:51:46,032 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T00:51:46,035 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C39803%2C1733359903735.meta, suffix=.meta, logDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735, archiveDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs, maxLogs=32 2024-12-05T00:51:46,048 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735/fea72ea5c4b6%2C39803%2C1733359903735.meta.1733359906037.meta, exclude list is [], retry=0 2024-12-05T00:51:46,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35037,DS-0e58e239-eb5c-4d6c-9391-0d1b8f75b3ee,DISK] 2024-12-05T00:51:46,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39373,DS-4ba6f0fd-eeb2-40f6-8e3a-68442b0afe6c,DISK] 2024-12-05T00:51:46,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35255,DS-3f2f41a6-0d6a-4780-8204-5cfed35c52ab,DISK] 2024-12-05T00:51:46,055 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735/fea72ea5c4b6%2C39803%2C1733359903735.meta.1733359906037.meta 2024-12-05T00:51:46,055 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40901:40901),(127.0.0.1/127.0.0.1:39857:39857),(127.0.0.1/127.0.0.1:38625:38625)] 2024-12-05T00:51:46,056 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:46,057 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:51:46,059 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:51:46,063 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:51:46,067 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:51:46,068 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:46,068 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:51:46,068 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:51:46,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:51:46,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:51:46,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:46,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:51:46,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:51:46,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:46,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:51:46,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:51:46,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:46,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:51:46,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:51:46,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:46,083 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:51:46,085 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740 2024-12-05T00:51:46,087 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740 2024-12-05T00:51:46,089 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:51:46,090 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:51:46,090 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:46,093 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:51:46,094 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59174870, jitterRate=-0.11822572350502014}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:46,095 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:51:46,096 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733359906068Writing region info on filesystem at 1733359906069 (+1 ms)Initializing all the Stores at 1733359906071 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359906071Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359906071Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359906071Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359906071Cleaning up temporary data from old regions at 1733359906090 (+19 ms)Running coprocessor post-open hooks at 1733359906095 (+5 ms)Region opened successfully at 1733359906096 (+1 ms) 2024-12-05T00:51:46,102 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733359906009 2024-12-05T00:51:46,111 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:51:46,112 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:51:46,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:46,116 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fea72ea5c4b6,39803,1733359903735, state=OPEN 2024-12-05T00:51:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:46,146 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:46,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:46,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:46,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:46,147 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:46,147 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:46,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:51:46,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,39803,1733359903735 in 302 msec 2024-12-05T00:51:46,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:51:46,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0610 sec 2024-12-05T00:51:46,161 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:46,162 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:51:46,180 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:51:46,181 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fea72ea5c4b6,39803,1733359903735, seqNum=-1] 2024-12-05T00:51:46,218 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:51:46,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41549, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:51:46,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4060 sec 2024-12-05T00:51:46,244 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733359906244, completionTime=-1 2024-12-05T00:51:46,248 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T00:51:46,248 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:51:46,274 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T00:51:46,274 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733359966274 2024-12-05T00:51:46,274 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733360026274 2024-12-05T00:51:46,274 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-05T00:51:46,276 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:51:46,283 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,284 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,284 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,286 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fea72ea5c4b6:39757, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,286 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,287 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,293 DEBUG [master/fea72ea5c4b6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:51:46,313 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.397sec 2024-12-05T00:51:46,314 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:51:46,315 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:51:46,316 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:51:46,317 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:51:46,317 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:51:46,317 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:46,318 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:51:46,322 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:51:46,323 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:51:46,324 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39757,1733359902807-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:46,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fa41cc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:46,350 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T00:51:46,350 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T00:51:46,354 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fea72ea5c4b6,39757,-1 for getting cluster id 2024-12-05T00:51:46,357 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:51:46,365 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '95ac2848-fd5d-471e-b44c-d69239aaba32' 2024-12-05T00:51:46,367 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:51:46,368 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "95ac2848-fd5d-471e-b44c-d69239aaba32" 2024-12-05T00:51:46,370 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@162f3029, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:46,370 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fea72ea5c4b6,39757,-1] 2024-12-05T00:51:46,372 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:51:46,373 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:46,375 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:51:46,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24ac6fb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:46,378 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:51:46,384 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fea72ea5c4b6,39803,1733359903735, seqNum=-1] 2024-12-05T00:51:46,385 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:51:46,387 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:51:46,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:46,408 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:51:46,412 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:46,415 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42f1365a 2024-12-05T00:51:46,416 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:51:46,419 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:51:46,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:51:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T00:51:46,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:51:46,435 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T00:51:46,436 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,438 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:51:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:46,447 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:46,447 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:46,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33292 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33292 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:46,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-05T00:51:46,454 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:46,457 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a5236c3302913465017cafe56777b1ed, NAME => 'TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693 2024-12-05T00:51:46,463 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:46,463 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:46,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:33302 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33302 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-05T00:51:46,472 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:46,472 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:46,472 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing a5236c3302913465017cafe56777b1ed, disabling compactions & flushes 2024-12-05T00:51:46,472 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,472 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,472 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. after waiting 0 ms 2024-12-05T00:51:46,472 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,473 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,473 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for a5236c3302913465017cafe56777b1ed: Waiting for close lock at 1733359906472Disabling compacts and flushes for region at 1733359906472Disabling writes for close at 1733359906472Writing region close event to WAL at 1733359906473 (+1 ms)Closed at 1733359906473 2024-12-05T00:51:46,475 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:51:46,480 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733359906475"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733359906475"}]},"ts":"1733359906475"} 2024-12-05T00:51:46,486 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:51:46,489 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:51:46,492 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733359906489"}]},"ts":"1733359906489"} 2024-12-05T00:51:46,501 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T00:51:46,502 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fea72ea5c4b6=0} racks are {/default-rack=0} 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:51:46,503 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:51:46,503 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:51:46,503 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:51:46,503 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:51:46,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a5236c3302913465017cafe56777b1ed, ASSIGN}] 2024-12-05T00:51:46,508 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a5236c3302913465017cafe56777b1ed, ASSIGN 2024-12-05T00:51:46,511 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a5236c3302913465017cafe56777b1ed, ASSIGN; state=OFFLINE, location=fea72ea5c4b6,39803,1733359903735; forceNewPlan=false, retain=false 2024-12-05T00:51:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:46,663 INFO [fea72ea5c4b6:39757 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T00:51:46,664 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a5236c3302913465017cafe56777b1ed, regionState=OPENING, regionLocation=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:46,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a5236c3302913465017cafe56777b1ed, ASSIGN because future has completed 2024-12-05T00:51:46,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a5236c3302913465017cafe56777b1ed, server=fea72ea5c4b6,39803,1733359903735}] 2024-12-05T00:51:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:46,829 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,829 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a5236c3302913465017cafe56777b1ed, NAME => 'TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:46,830 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,830 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:46,830 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,830 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,834 INFO [StoreOpener-a5236c3302913465017cafe56777b1ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,837 INFO [StoreOpener-a5236c3302913465017cafe56777b1ed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5236c3302913465017cafe56777b1ed columnFamilyName cf 2024-12-05T00:51:46,837 DEBUG [StoreOpener-a5236c3302913465017cafe56777b1ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:46,838 INFO [StoreOpener-a5236c3302913465017cafe56777b1ed-1 {}] regionserver.HStore(327): Store=a5236c3302913465017cafe56777b1ed/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:46,838 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,840 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,841 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,842 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,842 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,845 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,850 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:46,851 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a5236c3302913465017cafe56777b1ed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63250180, jitterRate=-0.05749887228012085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:51:46,852 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:46,853 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a5236c3302913465017cafe56777b1ed: Running coprocessor pre-open hook at 1733359906830Writing region info on filesystem at 1733359906830Initializing all the Stores at 1733359906832 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359906832Cleaning up temporary data from old regions at 1733359906842 (+10 ms)Running coprocessor post-open hooks at 1733359906852 (+10 ms)Region opened successfully at 1733359906852 2024-12-05T00:51:46,856 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed., pid=6, masterSystemTime=1733359906823 2024-12-05T00:51:46,861 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,861 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:46,864 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a5236c3302913465017cafe56777b1ed, regionState=OPEN, openSeqNum=2, regionLocation=fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:46,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a5236c3302913465017cafe56777b1ed, server=fea72ea5c4b6,39803,1733359903735 because future has completed 2024-12-05T00:51:46,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:51:46,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a5236c3302913465017cafe56777b1ed, server=fea72ea5c4b6,39803,1733359903735 in 204 msec 2024-12-05T00:51:46,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:51:46,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a5236c3302913465017cafe56777b1ed, ASSIGN in 373 msec 2024-12-05T00:51:46,888 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:51:46,889 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733359906889"}]},"ts":"1733359906889"} 2024-12-05T00:51:46,892 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T00:51:46,894 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:51:46,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 468 msec 2024-12-05T00:51:47,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:47,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T00:51:47,073 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T00:51:47,074 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:51:47,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T00:51:47,080 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:51:47,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T00:51:47,088 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed., hostname=fea72ea5c4b6,39803,1733359903735, seqNum=2] 2024-12-05T00:51:47,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T00:51:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T00:51:47,103 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:51:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:47,105 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:51:47,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:51:47,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:47,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39803 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T00:51:47,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:47,273 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing a5236c3302913465017cafe56777b1ed 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T00:51:47,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/.tmp/cf/6622233c2ab9435eba03248e14cf28ea is 36, key is row/cf:cq/1733359907090/Put/seqid=0 2024-12-05T00:51:47,344 WARN [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:47,344 WARN [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:47,355 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278138112_22 at /127.0.0.1:52120 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52120 dst: /127.0.0.1:35255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:47,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-05T00:51:47,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-05T00:51:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-05T00:51:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-05T00:51:47,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-05T00:51:47,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-05T00:51:47,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-05T00:51:47,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-05T00:51:47,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-05T00:51:47,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-05T00:51:47,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-05T00:51:47,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:47,762 WARN [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:47,762 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/.tmp/cf/6622233c2ab9435eba03248e14cf28ea 2024-12-05T00:51:47,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/.tmp/cf/6622233c2ab9435eba03248e14cf28ea as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/cf/6622233c2ab9435eba03248e14cf28ea 2024-12-05T00:51:47,818 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/cf/6622233c2ab9435eba03248e14cf28ea, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T00:51:47,824 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for a5236c3302913465017cafe56777b1ed in 550ms, sequenceid=5, compaction requested=false 2024-12-05T00:51:47,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-05T00:51:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for a5236c3302913465017cafe56777b1ed: 2024-12-05T00:51:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:47,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T00:51:47,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T00:51:47,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T00:51:47,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 726 msec 2024-12-05T00:51:47,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 739 msec 2024-12-05T00:51:48,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:48,242 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T00:51:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:51:48,258 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:51:48,258 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:48,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,264 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:51:48,264 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:51:48,264 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=404023594, stopped=false 2024-12-05T00:51:48,265 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fea72ea5c4b6,39757,1733359902807 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:48,277 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:48,277 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:48,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:48,277 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:48,278 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:48,278 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:48,278 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:48,278 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:48,278 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:51:48,278 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:48,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,42635,1733359903638' ***** 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,39803,1733359903735' ***** 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:48,279 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:48,279 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:48,279 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,41065,1733359903805' ***** 2024-12-05T00:51:48,279 INFO [RS:0;fea72ea5c4b6:42635 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:48,279 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:48,279 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:48,279 INFO [RS:0;fea72ea5c4b6:42635 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:48,279 INFO [RS:1;fea72ea5c4b6:39803 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:48,280 INFO [RS:1;fea72ea5c4b6:39803 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:48,280 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:48,280 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:48,280 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(3091): Received CLOSE for a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:48,280 INFO [RS:0;fea72ea5c4b6:42635 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fea72ea5c4b6:42635. 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:48,280 DEBUG [RS:0;fea72ea5c4b6:42635 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:48,280 DEBUG [RS:0;fea72ea5c4b6:42635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fea72ea5c4b6:41065. 2024-12-05T00:51:48,280 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:48,280 DEBUG [RS:2;fea72ea5c4b6:41065 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:48,280 DEBUG [RS:2;fea72ea5c4b6:41065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,280 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:48,280 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,42635,1733359903638; all regions closed. 2024-12-05T00:51:48,280 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:48,280 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,41065,1733359903805; all regions closed. 2024-12-05T00:51:48,280 INFO [RS:1;fea72ea5c4b6:39803 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fea72ea5c4b6:39803. 2024-12-05T00:51:48,280 DEBUG [RS:1;fea72ea5c4b6:39803 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:48,280 DEBUG [RS:1;fea72ea5c4b6:39803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,281 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:48,281 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a5236c3302913465017cafe56777b1ed, disabling compactions & flushes 2024-12-05T00:51:48,281 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:48,281 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:48,281 INFO [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:48,281 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:51:48,281 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:48,281 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. after waiting 0 ms 2024-12-05T00:51:48,281 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:48,282 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:51:48,282 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:51:48,282 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1325): Online Regions={a5236c3302913465017cafe56777b1ed=TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed., 1588230740=hbase:meta,,1.1588230740} 2024-12-05T00:51:48,282 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:51:48,282 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:51:48,282 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:51:48,282 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:51:48,282 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T00:51:48,282 DEBUG [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a5236c3302913465017cafe56777b1ed 2024-12-05T00:51:48,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_1073741828_1018 (size=93) 2024-12-05T00:51:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_1073741826_1016 (size=93) 2024-12-05T00:51:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_1073741828_1018 (size=93) 2024-12-05T00:51:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741826_1016 (size=93) 2024-12-05T00:51:48,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_1073741826_1016 (size=93) 2024-12-05T00:51:48,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741828_1018 (size=93) 2024-12-05T00:51:48,297 DEBUG [RS:2;fea72ea5c4b6:41065 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs 2024-12-05T00:51:48,297 INFO [RS:2;fea72ea5c4b6:41065 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fea72ea5c4b6%2C41065%2C1733359903805:(num 1733359905488) 2024-12-05T00:51:48,298 DEBUG [RS:2;fea72ea5c4b6:41065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:48,298 DEBUG [RS:0;fea72ea5c4b6:42635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs 2024-12-05T00:51:48,298 INFO [RS:0;fea72ea5c4b6:42635 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fea72ea5c4b6%2C42635%2C1733359903638:(num 1733359905485) 2024-12-05T00:51:48,298 DEBUG [RS:0;fea72ea5c4b6:42635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,298 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:48,298 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:48,298 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:48,298 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:48,298 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:48,299 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:48,299 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:48,299 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:48,299 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:48,299 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:48,299 INFO [RS:2;fea72ea5c4b6:41065 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41065 2024-12-05T00:51:48,299 INFO [RS:0;fea72ea5c4b6:42635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42635 2024-12-05T00:51:48,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,42635,1733359903638 2024-12-05T00:51:48,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:48,308 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:48,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,41065,1733359903805 2024-12-05T00:51:48,308 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:48,310 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,42635,1733359903638] 2024-12-05T00:51:48,313 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/info/6b65d2360be44e2c94318f587468b1b4 is 153, key is TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed./info:regioninfo/1733359906864/Put/seqid=0 2024-12-05T00:51:48,317 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/default/TestHBaseWalOnEC/a5236c3302913465017cafe56777b1ed/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T00:51:48,317 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,317 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,318 INFO [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:48,318 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a5236c3302913465017cafe56777b1ed: Waiting for close lock at 1733359908280Running coprocessor pre-close hooks at 1733359908281 (+1 ms)Disabling compacts and flushes for region at 1733359908281Disabling writes for close at 1733359908281Writing region close event to WAL at 1733359908283 (+2 ms)Running coprocessor post-close hooks at 1733359908318 (+35 ms)Closed at 1733359908318 2024-12-05T00:51:48,319 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733359906420.a5236c3302913465017cafe56777b1ed. 2024-12-05T00:51:48,322 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278138112_22 at /127.0.0.1:33364 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33364 dst: /127.0.0.1:39373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-05T00:51:48,326 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,327 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/info/6b65d2360be44e2c94318f587468b1b4 2024-12-05T00:51:48,329 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,42635,1733359903638 already deleted, retry=false 2024-12-05T00:51:48,329 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,42635,1733359903638 expired; onlineServers=2 2024-12-05T00:51:48,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,41065,1733359903805] 2024-12-05T00:51:48,338 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,338 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,338 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,339 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,41065,1733359903805 already deleted, retry=false 2024-12-05T00:51:48,340 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,41065,1733359903805 expired; onlineServers=1 2024-12-05T00:51:48,340 INFO [regionserver/fea72ea5c4b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:51:48,340 INFO [regionserver/fea72ea5c4b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:51:48,360 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/ns/fa1921f75ef64bbfaa682920e47d4d33 is 43, key is default/ns:d/1733359906224/Put/seqid=0 2024-12-05T00:51:48,362 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,362 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278138112_22 at /127.0.0.1:53470 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:35037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53470 dst: /127.0.0.1:35037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-05T00:51:48,373 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,373 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/ns/fa1921f75ef64bbfaa682920e47d4d33 2024-12-05T00:51:48,404 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/table/e104493b5a5040708b1f1593717f3dab is 52, key is TestHBaseWalOnEC/table:state/1733359906889/Put/seqid=0 2024-12-05T00:51:48,406 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,406 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278138112_22 at /127.0.0.1:53496 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53496 dst: /127.0.0.1:35037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-05T00:51:48,414 WARN [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,414 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/table/e104493b5a5040708b1f1593717f3dab 2024-12-05T00:51:48,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x101a2f918a20003, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42635-0x101a2f918a20001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,420 INFO [RS:0;fea72ea5c4b6:42635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:48,420 INFO [RS:2;fea72ea5c4b6:41065 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:48,420 INFO [RS:0;fea72ea5c4b6:42635 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,42635,1733359903638; zookeeper connection closed. 2024-12-05T00:51:48,420 INFO [RS:2;fea72ea5c4b6:41065 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,41065,1733359903805; zookeeper connection closed. 2024-12-05T00:51:48,420 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2aa69280 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2aa69280 2024-12-05T00:51:48,420 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ac5182b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ac5182b 2024-12-05T00:51:48,426 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/info/6b65d2360be44e2c94318f587468b1b4 as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/info/6b65d2360be44e2c94318f587468b1b4 2024-12-05T00:51:48,435 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/info/6b65d2360be44e2c94318f587468b1b4, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T00:51:48,437 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/ns/fa1921f75ef64bbfaa682920e47d4d33 as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/ns/fa1921f75ef64bbfaa682920e47d4d33 2024-12-05T00:51:48,445 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/ns/fa1921f75ef64bbfaa682920e47d4d33, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:51:48,447 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/.tmp/table/e104493b5a5040708b1f1593717f3dab as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/table/e104493b5a5040708b1f1593717f3dab 2024-12-05T00:51:48,457 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/table/e104493b5a5040708b1f1593717f3dab, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T00:51:48,460 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 177ms, sequenceid=11, compaction requested=false 2024-12-05T00:51:48,460 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:51:48,474 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:51:48,475 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:51:48,475 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:48,476 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733359908282Running coprocessor pre-close hooks at 1733359908282Disabling compacts and flushes for region at 1733359908282Disabling writes for close at 1733359908282Obtaining lock to block concurrent updates at 1733359908282Preparing flush snapshotting stores in 1588230740 at 1733359908282Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733359908283 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733359908285 (+2 ms)Flushing 1588230740/info: creating writer at 1733359908285Flushing 1588230740/info: appending metadata at 1733359908310 (+25 ms)Flushing 1588230740/info: closing flushed file at 1733359908310Flushing 1588230740/ns: creating writer at 1733359908337 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733359908358 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733359908358Flushing 1588230740/table: creating writer at 1733359908383 (+25 ms)Flushing 1588230740/table: appending metadata at 1733359908403 (+20 ms)Flushing 1588230740/table: closing flushed file at 1733359908403Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@308febb1: reopening flushed file at 1733359908424 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32ecab32: reopening flushed file at 1733359908435 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d119ef1: reopening flushed file at 1733359908446 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 177ms, sequenceid=11, compaction requested=false at 1733359908460 (+14 ms)Writing region close event to WAL at 1733359908467 (+7 ms)Running coprocessor post-close hooks at 1733359908475 (+8 ms)Closed at 1733359908475 2024-12-05T00:51:48,476 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:48,483 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,39803,1733359903735; all regions closed. 2024-12-05T00:51:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741829_1019 (size=2751) 2024-12-05T00:51:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_1073741829_1019 (size=2751) 2024-12-05T00:51:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_1073741829_1019 (size=2751) 2024-12-05T00:51:48,489 DEBUG [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs 2024-12-05T00:51:48,489 INFO [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fea72ea5c4b6%2C39803%2C1733359903735.meta:.meta(num 1733359906037) 2024-12-05T00:51:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741827_1017 (size=1298) 2024-12-05T00:51:48,493 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/WALs/fea72ea5c4b6,39803,1733359903735/fea72ea5c4b6%2C39803%2C1733359903735.1733359905488 not finished, retry = 0 2024-12-05T00:51:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_1073741827_1017 (size=1298) 2024-12-05T00:51:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_1073741827_1017 (size=1298) 2024-12-05T00:51:48,597 DEBUG [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/oldWALs 2024-12-05T00:51:48,597 INFO [RS:1;fea72ea5c4b6:39803 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fea72ea5c4b6%2C39803%2C1733359903735:(num 1733359905488) 2024-12-05T00:51:48,597 DEBUG [RS:1;fea72ea5c4b6:39803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:48,597 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:48,597 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:48,597 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:48,598 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:48,598 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:48,598 INFO [RS:1;fea72ea5c4b6:39803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39803 2024-12-05T00:51:48,624 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,39803,1733359903735 2024-12-05T00:51:48,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:48,624 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:48,624 ERROR [pool-71-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$373/0x00007f54ec8f8000@c06a831 rejected from java.util.concurrent.ThreadPoolExecutor@490427d5[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-05T00:51:48,625 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,39803,1733359903735] 2024-12-05T00:51:48,645 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,39803,1733359903735 already deleted, retry=false 2024-12-05T00:51:48,645 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,39803,1733359903735 expired; onlineServers=0 2024-12-05T00:51:48,645 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fea72ea5c4b6,39757,1733359902807' ***** 2024-12-05T00:51:48,645 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:51:48,645 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:48,645 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:48,646 DEBUG [M:0;fea72ea5c4b6:39757 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:51:48,646 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:51:48,646 DEBUG [M:0;fea72ea5c4b6:39757 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:51:48,646 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359904963 {}] cleaner.HFileCleaner(306): Exit Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359904963,5,FailOnTimeoutGroup] 2024-12-05T00:51:48,646 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.ChoreService(370): Chore service for: master/fea72ea5c4b6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:48,646 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:48,646 DEBUG [M:0;fea72ea5c4b6:39757 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:51:48,646 INFO [M:0;fea72ea5c4b6:39757 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:51:48,646 INFO [M:0;fea72ea5c4b6:39757 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:51:48,647 INFO [M:0;fea72ea5c4b6:39757 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:51:48,647 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359904964 {}] cleaner.HFileCleaner(306): Exit Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359904964,5,FailOnTimeoutGroup] 2024-12-05T00:51:48,647 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:51:48,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:48,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:48,656 DEBUG [M:0;fea72ea5c4b6:39757 {}] zookeeper.ZKUtil(347): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:51:48,656 WARN [M:0;fea72ea5c4b6:39757 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:51:48,657 INFO [M:0;fea72ea5c4b6:39757 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/.lastflushedseqids 2024-12-05T00:51:48,670 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,670 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:52198 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52198 dst: /127.0.0.1:35255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-05T00:51:48,685 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,685 INFO [M:0;fea72ea5c4b6:39757 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:51:48,685 INFO [M:0;fea72ea5c4b6:39757 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:51:48,685 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:51:48,685 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:48,685 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:48,685 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:51:48,686 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:48,686 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-05T00:51:48,705 DEBUG [M:0;fea72ea5c4b6:39757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae2765b49a2e44c7b13694634328b3c5 is 82, key is hbase:meta,,1/info:regioninfo/1733359906113/Put/seqid=0 2024-12-05T00:51:48,707 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,707 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:52216 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52216 dst: /127.0.0.1:35255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-05T00:51:48,714 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,715 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae2765b49a2e44c7b13694634328b3c5 2024-12-05T00:51:48,735 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,735 INFO [RS:1;fea72ea5c4b6:39803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:48,735 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39803-0x101a2f918a20002, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,735 INFO [RS:1;fea72ea5c4b6:39803 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,39803,1733359903735; zookeeper connection closed. 2024-12-05T00:51:48,739 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49ff905c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49ff905c 2024-12-05T00:51:48,739 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T00:51:48,750 DEBUG [M:0;fea72ea5c4b6:39757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/04bf23b7cb254b7d85fb21da5146cf41 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733359906897/Put/seqid=0 2024-12-05T00:51:48,752 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,753 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:52236 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:35255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52236 dst: /127.0.0.1:35255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-05T00:51:48,761 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,761 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/04bf23b7cb254b7d85fb21da5146cf41 2024-12-05T00:51:48,792 DEBUG [M:0;fea72ea5c4b6:39757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf1a0e4719ab45fc84629a56dc2df287 is 69, key is fea72ea5c4b6,39803,1733359903735/rs:state/1733359904997/Put/seqid=0 2024-12-05T00:51:48,796 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,796 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T00:51:48,800 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976733774_22 at /127.0.0.1:53520 [Receiving block BP-467069894-172.17.0.2-1733359898308:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53520 dst: /127.0.0.1:35037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:51:48,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-05T00:51:48,809 WARN [M:0;fea72ea5c4b6:39757 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T00:51:48,810 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf1a0e4719ab45fc84629a56dc2df287 2024-12-05T00:51:48,820 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae2765b49a2e44c7b13694634328b3c5 as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae2765b49a2e44c7b13694634328b3c5 2024-12-05T00:51:48,828 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae2765b49a2e44c7b13694634328b3c5, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T00:51:48,830 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/04bf23b7cb254b7d85fb21da5146cf41 as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/04bf23b7cb254b7d85fb21da5146cf41 2024-12-05T00:51:48,840 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/04bf23b7cb254b7d85fb21da5146cf41, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T00:51:48,842 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf1a0e4719ab45fc84629a56dc2df287 as hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf1a0e4719ab45fc84629a56dc2df287 2024-12-05T00:51:48,852 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf1a0e4719ab45fc84629a56dc2df287, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T00:51:48,854 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=72, compaction requested=false 2024-12-05T00:51:48,855 INFO [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:48,855 DEBUG [M:0;fea72ea5c4b6:39757 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733359908685Disabling compacts and flushes for region at 1733359908685Disabling writes for close at 1733359908685Obtaining lock to block concurrent updates at 1733359908686 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733359908686Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733359908686Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733359908688 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733359908688Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733359908704 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733359908705 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733359908721 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733359908749 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733359908749Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733359908771 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733359908791 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733359908791Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d4f426: reopening flushed file at 1733359908818 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4956bc85: reopening flushed file at 1733359908828 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25bdc7e2: reopening flushed file at 1733359908840 (+12 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=72, compaction requested=false at 1733359908854 (+14 ms)Writing region close event to WAL at 1733359908855 (+1 ms)Closed at 1733359908855 2024-12-05T00:51:48,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35255 is added to blk_1073741825_1011 (size=32683) 2024-12-05T00:51:48,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35037 is added to blk_1073741825_1011 (size=32683) 2024-12-05T00:51:48,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741825_1011 (size=32683) 2024-12-05T00:51:48,861 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:48,861 INFO [M:0;fea72ea5c4b6:39757 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:51:48,861 INFO [M:0;fea72ea5c4b6:39757 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39757 2024-12-05T00:51:48,862 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:48,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39757-0x101a2f918a20000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:48,998 INFO [M:0;fea72ea5c4b6:39757 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:49,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67fa62aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:49,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e9f3a79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:49,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:49,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41dce2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:49,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@435daa1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:49,010 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:49,010 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-467069894-172.17.0.2-1733359898308 (Datanode Uuid 87464b85-7c71-4e59-b28b-53e7b0e0c330) service to localhost/127.0.0.1:40131 2024-12-05T00:51:49,012 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data5/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,012 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:49,012 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data6/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,012 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:49,013 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:49,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41033a80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:49,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14721f03{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:49,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:49,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28ffdd72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:49,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23e84c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:49,020 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:49,020 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-467069894-172.17.0.2-1733359898308 (Datanode Uuid b12e04d0-6876-4bff-ad8b-ddc923f4168b) service to localhost/127.0.0.1:40131 2024-12-05T00:51:49,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data3/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data4/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,021 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:49,021 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:49,022 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:49,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bf2c732{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:49,024 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1182e874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:49,024 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:49,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a906869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:49,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7728820b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:49,026 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:49,026 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:49,026 WARN [BP-467069894-172.17.0.2-1733359898308 heartbeating to localhost/127.0.0.1:40131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-467069894-172.17.0.2-1733359898308 (Datanode Uuid a3657a6b-75ae-488c-baff-06e397515557) service to localhost/127.0.0.1:40131 2024-12-05T00:51:49,026 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:49,027 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data1/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,027 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/cluster_7a71daaa-e83f-6826-a5c3-b12584b293e3/data/data2/current/BP-467069894-172.17.0.2-1733359898308 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:49,027 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:49,037 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e22261{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:51:49,038 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3599471c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:49,038 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:49,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2faf2775{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:49,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e18bd18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:49,048 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:51:49,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:51:49,086 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=87 (was 157), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=679 (was 668) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=10837 (was 9972) - AvailableMemoryMB LEAK? - 2024-12-05T00:51:49,091 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=87, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=679, ProcessCount=11, AvailableMemoryMB=10836 2024-12-05T00:51:49,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:51:49,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.log.dir so I do NOT create it in target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83 2024-12-05T00:51:49,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6110c17f-7b21-a3ac-1fc8-06b3e57cfe29/hadoop.tmp.dir so I do NOT create it in target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7, deleteOnExit=true 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/test.cache.data in system properties and HBase conf 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:51:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:51:49,092 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:51:49,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:51:49,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:51:49,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:51:49,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:51:49,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:51:49,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:51:49,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:49,496 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:49,502 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:49,502 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:49,503 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:51:49,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:49,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7005d2e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:49,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c410e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:49,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7826e44{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/java.io.tmpdir/jetty-localhost-34557-hadoop-hdfs-3_4_1-tests_jar-_-any-17070724672572850813/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:51:49,640 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69e41ac7{HTTP/1.1, (http/1.1)}{localhost:34557} 2024-12-05T00:51:49,641 INFO [Time-limited test {}] server.Server(415): Started @13425ms 2024-12-05T00:51:49,958 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:49,963 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:49,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:49,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:49,964 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:51:49,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45d5becd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:49,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@592a8291{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:50,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@776d8022{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/java.io.tmpdir/jetty-localhost-44803-hadoop-hdfs-3_4_1-tests_jar-_-any-7564807930876510609/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:50,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ffd79d0{HTTP/1.1, (http/1.1)}{localhost:44803} 2024-12-05T00:51:50,064 INFO [Time-limited test {}] server.Server(415): Started @13848ms 2024-12-05T00:51:50,066 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:50,132 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:50,137 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:50,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:50,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:50,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:51:50,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e96b0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:50,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58778b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:50,268 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62356e52{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/java.io.tmpdir/jetty-localhost-41143-hadoop-hdfs-3_4_1-tests_jar-_-any-10621885558738980872/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:50,269 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@259f19e7{HTTP/1.1, (http/1.1)}{localhost:41143} 2024-12-05T00:51:50,269 INFO [Time-limited test {}] server.Server(415): Started @14053ms 2024-12-05T00:51:50,271 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:50,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:51:50,317 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:51:50,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:51:50,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:51:50,320 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:51:50,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65c795a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:51:50,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67530df7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:51:50,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2735ddad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/java.io.tmpdir/jetty-localhost-39547-hadoop-hdfs-3_4_1-tests_jar-_-any-17581245671499674789/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:50,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@387982f9{HTTP/1.1, (http/1.1)}{localhost:39547} 2024-12-05T00:51:50,457 INFO [Time-limited test {}] server.Server(415): Started @14241ms 2024-12-05T00:51:50,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:51:51,572 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T00:51:51,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:51:51,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:51:51,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T00:51:51,645 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data1/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:51,645 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data2/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:51,677 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0c928a1ae57437e with lease ID 0xea083b6dc5a4ca11: Processing first storage report for DS-76dfe2ad-cf08-4125-ae26-65f43fa24c49 from datanode DatanodeRegistration(127.0.0.1:45923, datanodeUuid=b223ef43-9ccf-41d0-9f38-6606b0c952ff, infoPort=36055, infoSecurePort=0, ipcPort=44805, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0c928a1ae57437e with lease ID 0xea083b6dc5a4ca11: from storage DS-76dfe2ad-cf08-4125-ae26-65f43fa24c49 node DatanodeRegistration(127.0.0.1:45923, datanodeUuid=b223ef43-9ccf-41d0-9f38-6606b0c952ff, infoPort=36055, infoSecurePort=0, ipcPort=44805, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0c928a1ae57437e with lease ID 0xea083b6dc5a4ca11: Processing first storage report for DS-fed7ea9d-1f9d-423d-bb55-bb74082eaf1c from datanode DatanodeRegistration(127.0.0.1:45923, datanodeUuid=b223ef43-9ccf-41d0-9f38-6606b0c952ff, infoPort=36055, infoSecurePort=0, ipcPort=44805, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0c928a1ae57437e with lease ID 0xea083b6dc5a4ca11: from storage DS-fed7ea9d-1f9d-423d-bb55-bb74082eaf1c node DatanodeRegistration(127.0.0.1:45923, datanodeUuid=b223ef43-9ccf-41d0-9f38-6606b0c952ff, infoPort=36055, infoSecurePort=0, ipcPort=44805, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:51,993 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data3/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:51,994 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data4/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:52,029 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf2a4370751424b6 with lease ID 0xea083b6dc5a4ca12: Processing first storage report for DS-1dab13a6-2bbf-4f00-91a6-6407c9166c89 from datanode DatanodeRegistration(127.0.0.1:42605, datanodeUuid=e9305cf5-98e7-4f7c-8bef-c4fd47e37393, infoPort=35069, infoSecurePort=0, ipcPort=40933, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf2a4370751424b6 with lease ID 0xea083b6dc5a4ca12: from storage DS-1dab13a6-2bbf-4f00-91a6-6407c9166c89 node DatanodeRegistration(127.0.0.1:42605, datanodeUuid=e9305cf5-98e7-4f7c-8bef-c4fd47e37393, infoPort=35069, infoSecurePort=0, ipcPort=40933, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf2a4370751424b6 with lease ID 0xea083b6dc5a4ca12: Processing first storage report for DS-0bc17a16-1142-45ad-8b89-f6ac91df97c0 from datanode DatanodeRegistration(127.0.0.1:42605, datanodeUuid=e9305cf5-98e7-4f7c-8bef-c4fd47e37393, infoPort=35069, infoSecurePort=0, ipcPort=40933, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf2a4370751424b6 with lease ID 0xea083b6dc5a4ca12: from storage DS-0bc17a16-1142-45ad-8b89-f6ac91df97c0 node DatanodeRegistration(127.0.0.1:42605, datanodeUuid=e9305cf5-98e7-4f7c-8bef-c4fd47e37393, infoPort=35069, infoSecurePort=0, ipcPort=40933, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:52,135 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data6/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:52,135 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data5/current/BP-1172660541-172.17.0.2-1733359909120/current, will proceed with Du for space computation calculation, 2024-12-05T00:51:52,174 WARN [Thread-549 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:51:52,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c74ce41656b0aa with lease ID 0xea083b6dc5a4ca13: Processing first storage report for DS-22a703c4-3477-4b1a-be21-bf208daff0b0 from datanode DatanodeRegistration(127.0.0.1:46297, datanodeUuid=0bb39949-6f35-4273-82fb-1d105e28b9d2, infoPort=39709, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:52,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c74ce41656b0aa with lease ID 0xea083b6dc5a4ca13: from storage DS-22a703c4-3477-4b1a-be21-bf208daff0b0 node DatanodeRegistration(127.0.0.1:46297, datanodeUuid=0bb39949-6f35-4273-82fb-1d105e28b9d2, infoPort=39709, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:52,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c74ce41656b0aa with lease ID 0xea083b6dc5a4ca13: Processing first storage report for DS-0718104d-9f21-4298-aa51-94f6e5776f98 from datanode DatanodeRegistration(127.0.0.1:46297, datanodeUuid=0bb39949-6f35-4273-82fb-1d105e28b9d2, infoPort=39709, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120) 2024-12-05T00:51:52,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c74ce41656b0aa with lease ID 0xea083b6dc5a4ca13: from storage DS-0718104d-9f21-4298-aa51-94f6e5776f98 node DatanodeRegistration(127.0.0.1:46297, datanodeUuid=0bb39949-6f35-4273-82fb-1d105e28b9d2, infoPort=39709, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=2098677126;c=1733359909120), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:51:52,223 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83 2024-12-05T00:51:52,232 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/zookeeper_0, clientPort=59194, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:51:52,233 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59194 2024-12-05T00:51:52,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,235 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:51:52,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:51:52,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:51:52,255 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 with version=8 2024-12-05T00:51:52,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40131/user/jenkins/test-data/b1f8ac30-b5c5-600f-cbd4-4bc6618fc693/hbase-staging 2024-12-05T00:51:52,257 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:51:52,257 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:52,258 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32923 2024-12-05T00:51:52,259 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32923 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-05T00:51:52,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329230x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:52,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32923-0x101a2f940cc0000 connected 2024-12-05T00:51:52,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:52,598 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7, hbase.cluster.distributed=false 2024-12-05T00:51:52,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:52,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32923 2024-12-05T00:51:52,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32923 2024-12-05T00:51:52,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32923 2024-12-05T00:51:52,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32923 2024-12-05T00:51:52,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32923 2024-12-05T00:51:52,622 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:52,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,622 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:52,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:52,623 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:52,623 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:52,624 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39187 2024-12-05T00:51:52,626 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39187 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-05T00:51:52,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,644 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391870x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:52,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39187-0x101a2f940cc0001 connected 2024-12-05T00:51:52,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:52,645 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:52,648 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:52,649 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:52,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:52,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39187 2024-12-05T00:51:52,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39187 2024-12-05T00:51:52,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39187 2024-12-05T00:51:52,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39187 2024-12-05T00:51:52,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39187 2024-12-05T00:51:52,675 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:52,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,676 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:52,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:52,676 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:52,676 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:52,677 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34267 2024-12-05T00:51:52,679 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34267 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-05T00:51:52,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,697 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342670x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:52,697 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342670x0, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:52,697 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34267-0x101a2f940cc0002 connected 2024-12-05T00:51:52,698 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:52,698 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:52,699 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:52,700 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:52,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34267 2024-12-05T00:51:52,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34267 2024-12-05T00:51:52,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34267 2024-12-05T00:51:52,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34267 2024-12-05T00:51:52,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34267 2024-12-05T00:51:52,726 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fea72ea5c4b6:0 server-side Connection retries=45 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:51:52,726 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:51:52,727 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:51:52,727 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35921 2024-12-05T00:51:52,729 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35921 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-05T00:51:52,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,742 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359210x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:51:52,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:52,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35921-0x101a2f940cc0003 connected 2024-12-05T00:51:52,743 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:51:52,747 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:51:52,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:51:52,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:51:52,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35921 2024-12-05T00:51:52,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35921 2024-12-05T00:51:52,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35921 2024-12-05T00:51:52,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35921 2024-12-05T00:51:52,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35921 2024-12-05T00:51:52,765 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fea72ea5c4b6:32923 2024-12-05T00:51:52,766 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:52,773 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,773 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,773 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,775 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:52,784 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:52,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,784 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:52,784 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:52,784 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,784 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,784 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,785 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:51:52,786 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fea72ea5c4b6,32923,1733359912257 from backup master directory 2024-12-05T00:51:52,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:52,794 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,794 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,794 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:52,794 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:51:52,794 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:52,802 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/hbase.id] with ID: ab3adf63-3ffe-4f88-a021-0995f9acb5af 2024-12-05T00:51:52,802 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/.tmp/hbase.id 2024-12-05T00:51:52,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:51:52,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:51:52,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:51:52,819 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/.tmp/hbase.id]:[hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/hbase.id] 2024-12-05T00:51:52,839 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:51:52,840 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:51:52,842 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-05T00:51:52,854 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,854 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,857 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:51:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:51:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:51:52,873 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:51:52,874 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:51:52,879 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:51:52,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:51:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:51:52,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:51:52,898 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store 2024-12-05T00:51:52,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:51:52,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:51:52,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:51:52,916 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:52,916 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:52,916 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733359912916Disabling compacts and flushes for region at 1733359912916Disabling writes for close at 1733359912916Writing region close event to WAL at 1733359912916Closed at 1733359912916 2024-12-05T00:51:52,918 WARN [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/.initializing 2024-12-05T00:51:52,918 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/WALs/fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:52,922 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C32923%2C1733359912257, suffix=, logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/WALs/fea72ea5c4b6,32923,1733359912257, archiveDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/oldWALs, maxLogs=10 2024-12-05T00:51:52,922 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor fea72ea5c4b6%2C32923%2C1733359912257.1733359912922 2024-12-05T00:51:52,934 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/WALs/fea72ea5c4b6,32923,1733359912257/fea72ea5c4b6%2C32923%2C1733359912257.1733359912922 2024-12-05T00:51:52,936 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:36055:36055),(127.0.0.1/127.0.0.1:39709:39709)] 2024-12-05T00:51:52,938 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:52,938 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:52,938 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,938 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:51:52,943 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:52,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:52,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:51:52,946 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:52,946 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:52,946 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:51:52,950 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:52,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:52,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:51:52,953 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:52,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:52,953 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,954 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,955 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,957 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,957 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,957 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:52,959 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:51:52,961 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:52,962 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74077920, jitterRate=0.10384702682495117}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:52,963 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733359912939Initializing all the Stores at 1733359912940 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359912940Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359912941 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359912941Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359912941Cleaning up temporary data from old regions at 1733359912957 (+16 ms)Region opened successfully at 1733359912963 (+6 ms) 2024-12-05T00:51:52,963 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:51:52,968 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61620ab6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:52,969 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:51:52,969 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:51:52,969 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:51:52,970 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:51:52,970 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T00:51:52,971 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T00:51:52,971 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:51:52,974 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:51:52,975 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:51:52,983 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:51:52,984 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:51:52,985 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:51:52,994 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:51:52,995 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:51:52,996 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:51:53,004 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:51:53,005 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:51:53,015 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:51:53,018 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:51:53,026 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:51:53,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:53,036 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:53,036 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:53,036 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:53,036 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,036 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,036 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,037 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fea72ea5c4b6,32923,1733359912257, sessionid=0x101a2f940cc0000, setting cluster-up flag (Was=false) 2024-12-05T00:51:53,057 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,057 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,057 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,089 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:51:53,091 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:53,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,110 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,110 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,111 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,141 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:51:53,143 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:53,145 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:51:53,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T00:51:53,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T00:51:53,150 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:53,150 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:51:53,151 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:51:53,151 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fea72ea5c4b6,32923,1733359912257 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:51:53,152 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:53,152 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fea72ea5c4b6:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:53,153 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733359943154 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:51:53,154 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,155 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:51:53,155 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:51:53,155 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:51:53,155 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:53,155 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:51:53,156 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:51:53,156 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:51:53,156 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359913156,5,FailOnTimeoutGroup] 2024-12-05T00:51:53,157 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359913156,5,FailOnTimeoutGroup] 2024-12-05T00:51:53,157 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,157 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:51:53,157 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,157 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,157 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,157 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(746): ClusterId : ab3adf63-3ffe-4f88-a021-0995f9acb5af 2024-12-05T00:51:53,157 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:53,157 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(746): ClusterId : ab3adf63-3ffe-4f88-a021-0995f9acb5af 2024-12-05T00:51:53,157 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:53,157 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:51:53,163 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(746): ClusterId : ab3adf63-3ffe-4f88-a021-0995f9acb5af 2024-12-05T00:51:53,163 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:51:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:51:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:51:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:51:53,170 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:51:53,170 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 2024-12-05T00:51:53,173 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:53,173 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:53,173 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:53,174 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:53,174 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:51:53,174 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:51:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:51:53,185 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:53,185 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:53,185 DEBUG [RS:0;fea72ea5c4b6:39187 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1202702, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:53,186 DEBUG [RS:1;fea72ea5c4b6:34267 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f714cac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:53,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:51:53,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:51:53,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:53,187 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:51:53,188 DEBUG [RS:2;fea72ea5c4b6:35921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a5c18fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fea72ea5c4b6/172.17.0.2:0 2024-12-05T00:51:53,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:51:53,202 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fea72ea5c4b6:39187 2024-12-05T00:51:53,202 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:53,202 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fea72ea5c4b6:34267 2024-12-05T00:51:53,202 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:53,202 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:53,202 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:53,202 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:53,202 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:53,203 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,32923,1733359912257 with port=34267, startcode=1733359912675 2024-12-05T00:51:53,204 DEBUG [RS:1;fea72ea5c4b6:34267 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:53,204 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fea72ea5c4b6:35921 2024-12-05T00:51:53,204 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:51:53,204 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:51:53,204 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:51:53,205 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,32923,1733359912257 with port=39187, startcode=1733359912621 2024-12-05T00:51:53,205 DEBUG [RS:0;fea72ea5c4b6:39187 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:53,205 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(2659): reportForDuty to master=fea72ea5c4b6,32923,1733359912257 with port=35921, startcode=1733359912725 2024-12-05T00:51:53,205 DEBUG [RS:2;fea72ea5c4b6:35921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:51:53,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:51:53,206 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:51:53,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:51:53,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:51:53,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:51:53,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:51:53,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:51:53,217 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:51:53,219 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740 2024-12-05T00:51:53,220 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740 2024-12-05T00:51:53,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:51:53,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:51:53,223 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:53,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:51:53,232 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52789, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:53,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,236 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43835, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:53,237 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,237 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,239 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:53,240 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57929, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:51:53,240 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 2024-12-05T00:51:53,240 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34663 2024-12-05T00:51:53,240 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 2024-12-05T00:51:53,240 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:53,240 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34663 2024-12-05T00:51:53,240 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:53,240 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68602533, jitterRate=0.022257402539253235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:53,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] master.ServerManager(517): Registering regionserver=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733359913187Initializing all the Stores at 1733359913194 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913194Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913198 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359913198Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913198Cleaning up temporary data from old regions at 1733359913222 (+24 ms)Region opened successfully at 1733359913242 (+20 ms) 2024-12-05T00:51:53,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:51:53,242 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:51:53,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:51:53,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:51:53,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:51:53,244 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:53,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733359913242Disabling compacts and flushes for region at 1733359913242Disabling writes for close at 1733359913242Writing region close event to WAL at 1733359913243 (+1 ms)Closed at 1733359913243 2024-12-05T00:51:53,245 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 2024-12-05T00:51:53,245 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34663 2024-12-05T00:51:53,245 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:51:53,257 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:53,257 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:51:53,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:51:53,260 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:51:53,263 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:51:53,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:53,299 DEBUG [RS:2;fea72ea5c4b6:35921 {}] zookeeper.ZKUtil(111): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,299 DEBUG [RS:0;fea72ea5c4b6:39187 {}] zookeeper.ZKUtil(111): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,300 WARN [RS:0;fea72ea5c4b6:39187 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:53,300 WARN [RS:2;fea72ea5c4b6:35921 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:53,300 INFO [RS:0;fea72ea5c4b6:39187 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:51:53,300 INFO [RS:2;fea72ea5c4b6:35921 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:51:53,300 DEBUG [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,300 DEBUG [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,301 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,39187,1733359912621] 2024-12-05T00:51:53,301 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,35921,1733359912725] 2024-12-05T00:51:53,301 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fea72ea5c4b6,34267,1733359912675] 2024-12-05T00:51:53,302 DEBUG [RS:1;fea72ea5c4b6:34267 {}] zookeeper.ZKUtil(111): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,302 WARN [RS:1;fea72ea5c4b6:34267 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:51:53,302 INFO [RS:1;fea72ea5c4b6:34267 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:51:53,302 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,306 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:53,310 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:53,316 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:53,318 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:51:53,323 INFO [RS:0;fea72ea5c4b6:39187 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:53,323 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,324 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:53,325 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:53,326 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:53,326 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,327 DEBUG [RS:0;fea72ea5c4b6:39187 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,328 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:53,329 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:51:53,329 INFO [RS:2;fea72ea5c4b6:35921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:53,330 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,330 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:53,331 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:53,331 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,331 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,332 DEBUG [RS:2;fea72ea5c4b6:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,335 INFO [RS:1;fea72ea5c4b6:34267 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:51:53,335 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,344 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,345 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,35921,1733359912725-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:53,346 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:51:53,346 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,346 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,346 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fea72ea5c4b6:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:51:53,347 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,348 DEBUG [RS:1;fea72ea5c4b6:34267 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:51:53,352 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,352 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,352 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,352 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,352 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,353 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39187,1733359912621-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:53,361 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,361 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,361 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,362 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,362 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,362 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,34267,1733359912675-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:53,363 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:53,363 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,35921,1733359912725-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,363 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,363 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.Replication(171): fea72ea5c4b6,35921,1733359912725 started 2024-12-05T00:51:53,381 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:53,381 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,381 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,39187,1733359912621-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,381 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,35921,1733359912725, RpcServer on fea72ea5c4b6/172.17.0.2:35921, sessionid=0x101a2f940cc0003 2024-12-05T00:51:53,382 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:53,382 DEBUG [RS:2;fea72ea5c4b6:35921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,382 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,35921,1733359912725' 2024-12-05T00:51:53,382 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:53,382 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,382 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.Replication(171): fea72ea5c4b6,39187,1733359912621 started 2024-12-05T00:51:53,384 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:53,385 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:51:53,386 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,34267,1733359912675-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,386 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,386 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.Replication(171): fea72ea5c4b6,34267,1733359912675 started 2024-12-05T00:51:53,388 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:53,389 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:53,389 DEBUG [RS:2;fea72ea5c4b6:35921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:53,389 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,35921,1733359912725' 2024-12-05T00:51:53,389 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:53,390 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:53,390 DEBUG [RS:2;fea72ea5c4b6:35921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:53,390 INFO [RS:2;fea72ea5c4b6:35921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:53,390 INFO [RS:2;fea72ea5c4b6:35921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:53,397 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,397 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,39187,1733359912621, RpcServer on fea72ea5c4b6/172.17.0.2:39187, sessionid=0x101a2f940cc0001 2024-12-05T00:51:53,397 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:53,397 DEBUG [RS:0;fea72ea5c4b6:39187 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,397 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,39187,1733359912621' 2024-12-05T00:51:53,397 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,39187,1733359912621' 2024-12-05T00:51:53,398 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:53,399 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:53,399 DEBUG [RS:0;fea72ea5c4b6:39187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:53,399 INFO [RS:0;fea72ea5c4b6:39187 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:53,399 INFO [RS:0;fea72ea5c4b6:39187 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:53,406 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:53,406 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1482): Serving as fea72ea5c4b6,34267,1733359912675, RpcServer on fea72ea5c4b6/172.17.0.2:34267, sessionid=0x101a2f940cc0002 2024-12-05T00:51:53,406 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:51:53,406 DEBUG [RS:1;fea72ea5c4b6:34267 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,406 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,34267,1733359912675' 2024-12-05T00:51:53,406 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:51:53,407 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:51:53,407 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:51:53,407 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:51:53,407 DEBUG [RS:1;fea72ea5c4b6:34267 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,408 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fea72ea5c4b6,34267,1733359912675' 2024-12-05T00:51:53,408 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:51:53,408 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:51:53,409 DEBUG [RS:1;fea72ea5c4b6:34267 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:51:53,409 INFO [RS:1;fea72ea5c4b6:34267 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:51:53,409 INFO [RS:1;fea72ea5c4b6:34267 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:51:53,413 WARN [fea72ea5c4b6:32923 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:51:53,494 INFO [RS:2;fea72ea5c4b6:35921 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C35921%2C1733359912725, suffix=, logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,35921,1733359912725, archiveDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs, maxLogs=32 2024-12-05T00:51:53,497 INFO [RS:2;fea72ea5c4b6:35921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fea72ea5c4b6%2C35921%2C1733359912725.1733359913496 2024-12-05T00:51:53,502 INFO [RS:0;fea72ea5c4b6:39187 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C39187%2C1733359912621, suffix=, logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,39187,1733359912621, archiveDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs, maxLogs=32 2024-12-05T00:51:53,504 INFO [RS:0;fea72ea5c4b6:39187 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fea72ea5c4b6%2C39187%2C1733359912621.1733359913504 2024-12-05T00:51:53,511 INFO [RS:2;fea72ea5c4b6:35921 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,35921,1733359912725/fea72ea5c4b6%2C35921%2C1733359912725.1733359913496 2024-12-05T00:51:53,511 INFO [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C34267%2C1733359912675, suffix=, logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,34267,1733359912675, archiveDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs, maxLogs=32 2024-12-05T00:51:53,512 INFO [RS:1;fea72ea5c4b6:34267 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fea72ea5c4b6%2C34267%2C1733359912675.1733359913512 2024-12-05T00:51:53,522 DEBUG [RS:2;fea72ea5c4b6:35921 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:39709:39709),(127.0.0.1/127.0.0.1:36055:36055)] 2024-12-05T00:51:53,524 INFO [RS:0;fea72ea5c4b6:39187 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,39187,1733359912621/fea72ea5c4b6%2C39187%2C1733359912621.1733359913504 2024-12-05T00:51:53,527 DEBUG [RS:0;fea72ea5c4b6:39187 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:36055:36055),(127.0.0.1/127.0.0.1:39709:39709)] 2024-12-05T00:51:53,530 INFO [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,34267,1733359912675/fea72ea5c4b6%2C34267%2C1733359912675.1733359913512 2024-12-05T00:51:53,531 DEBUG [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:39709:39709),(127.0.0.1/127.0.0.1:36055:36055)] 2024-12-05T00:51:53,664 DEBUG [fea72ea5c4b6:32923 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T00:51:53,664 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(204): Hosts are {fea72ea5c4b6=0} racks are {/default-rack=0} 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:51:53,666 INFO [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:51:53,666 INFO [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:51:53,666 INFO [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:51:53,666 DEBUG [fea72ea5c4b6:32923 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:51:53,667 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:53,669 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fea72ea5c4b6,34267,1733359912675, state=OPENING 2024-12-05T00:51:53,739 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:51:53,801 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,801 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,801 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:53,802 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:53,802 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:51:53,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,34267,1733359912675}] 2024-12-05T00:51:53,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:53,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:53,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:53,958 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:51:53,959 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:51:53,965 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:51:53,965 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T00:51:53,968 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fea72ea5c4b6%2C34267%2C1733359912675.meta, suffix=.meta, logDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,34267,1733359912675, archiveDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs, maxLogs=32 2024-12-05T00:51:53,969 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor fea72ea5c4b6%2C34267%2C1733359912675.meta.1733359913969.meta 2024-12-05T00:51:53,980 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/WALs/fea72ea5c4b6,34267,1733359912675/fea72ea5c4b6%2C34267%2C1733359912675.meta.1733359913969.meta 2024-12-05T00:51:53,985 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39709:39709),(127.0.0.1/127.0.0.1:35069:35069),(127.0.0.1/127.0.0.1:36055:36055)] 2024-12-05T00:51:53,988 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:53,988 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:51:53,988 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:51:53,989 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:51:53,989 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:51:53,989 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:53,989 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:51:53,989 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:51:53,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:51:53,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:51:53,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:51:53,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:51:53,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:51:53,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:51:53,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:53,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:53,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:51:53,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:51:53,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:54,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:51:54,000 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:51:54,001 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740 2024-12-05T00:51:54,003 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740 2024-12-05T00:51:54,004 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:51:54,004 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:51:54,005 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:51:54,006 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:51:54,007 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72120669, jitterRate=0.07468171417713165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:51:54,007 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:51:54,008 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733359913989Writing region info on filesystem at 1733359913989Initializing all the Stores at 1733359913990 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913990Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913991 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359913991Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733359913991Cleaning up temporary data from old regions at 1733359914004 (+13 ms)Running coprocessor post-open hooks at 1733359914007 (+3 ms)Region opened successfully at 1733359914008 (+1 ms) 2024-12-05T00:51:54,010 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733359913957 2024-12-05T00:51:54,013 DEBUG [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:51:54,013 INFO [RS_OPEN_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:51:54,014 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:54,016 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fea72ea5c4b6,34267,1733359912675, state=OPEN 2024-12-05T00:51:54,096 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:54,096 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:54,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:54,096 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:54,096 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:54,097 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:54,097 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:54,096 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:51:54,097 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:51:54,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:51:54,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fea72ea5c4b6,34267,1733359912675 in 295 msec 2024-12-05T00:51:54,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:51:54,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 845 msec 2024-12-05T00:51:54,107 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:51:54,107 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:51:54,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:51:54,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fea72ea5c4b6,34267,1733359912675, seqNum=-1] 2024-12-05T00:51:54,109 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:51:54,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:51:54,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 971 msec 2024-12-05T00:51:54,120 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733359914120, completionTime=-1 2024-12-05T00:51:54,120 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T00:51:54,120 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:51:54,122 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T00:51:54,122 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733359974122 2024-12-05T00:51:54,122 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733360034122 2024-12-05T00:51:54,122 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T00:51:54,123 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:51:54,123 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,123 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,123 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,123 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fea72ea5c4b6:32923, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,124 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,124 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,126 DEBUG [master/fea72ea5c4b6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.333sec 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:51:54,129 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:51:54,132 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:51:54,132 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:51:54,132 INFO [master/fea72ea5c4b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fea72ea5c4b6,32923,1733359912257-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:51:54,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d5c3596, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:54,158 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fea72ea5c4b6,32923,-1 for getting cluster id 2024-12-05T00:51:54,158 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:51:54,159 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ab3adf63-3ffe-4f88-a021-0995f9acb5af' 2024-12-05T00:51:54,160 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:51:54,160 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ab3adf63-3ffe-4f88-a021-0995f9acb5af" 2024-12-05T00:51:54,161 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698f241a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:54,161 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fea72ea5c4b6,32923,-1] 2024-12-05T00:51:54,161 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:51:54,161 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:54,162 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:51:54,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208ff8b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:51:54,164 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:51:54,165 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fea72ea5c4b6,34267,1733359912675, seqNum=-1] 2024-12-05T00:51:54,165 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:51:54,167 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:51:54,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:54,170 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:51:54,171 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:54,171 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@648113f0 2024-12-05T00:51:54,172 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:51:54,173 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:51:54,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:51:54,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T00:51:54,178 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:51:54,178 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:54,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T00:51:54,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:54,180 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:51:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741837_1013 (size=392) 2024-12-05T00:51:54,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741837_1013 (size=392) 2024-12-05T00:51:54,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741837_1013 (size=392) 2024-12-05T00:51:54,193 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4e7968e43bb10158d4a67a8e4798895b, NAME => 'TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7 2024-12-05T00:51:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741838_1014 (size=51) 2024-12-05T00:51:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741838_1014 (size=51) 2024-12-05T00:51:54,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741838_1014 (size=51) 2024-12-05T00:51:54,202 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:54,202 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4e7968e43bb10158d4a67a8e4798895b, disabling compactions & flushes 2024-12-05T00:51:54,202 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,203 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,203 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. after waiting 0 ms 2024-12-05T00:51:54,203 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,203 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,203 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4e7968e43bb10158d4a67a8e4798895b: Waiting for close lock at 1733359914202Disabling compacts and flushes for region at 1733359914202Disabling writes for close at 1733359914203 (+1 ms)Writing region close event to WAL at 1733359914203Closed at 1733359914203 2024-12-05T00:51:54,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:51:54,205 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733359914205"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733359914205"}]},"ts":"1733359914205"} 2024-12-05T00:51:54,209 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:51:54,210 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:51:54,211 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733359914211"}]},"ts":"1733359914211"} 2024-12-05T00:51:54,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T00:51:54,214 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fea72ea5c4b6=0} racks are {/default-rack=0} 2024-12-05T00:51:54,215 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:51:54,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:51:54,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:51:54,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:51:54,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:51:54,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b, ASSIGN}] 2024-12-05T00:51:54,218 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b, ASSIGN 2024-12-05T00:51:54,220 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b, ASSIGN; state=OFFLINE, location=fea72ea5c4b6,34267,1733359912675; forceNewPlan=false, retain=false 2024-12-05T00:51:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:54,370 INFO [fea72ea5c4b6:32923 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T00:51:54,371 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4e7968e43bb10158d4a67a8e4798895b, regionState=OPENING, regionLocation=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:54,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b, ASSIGN because future has completed 2024-12-05T00:51:54,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e7968e43bb10158d4a67a8e4798895b, server=fea72ea5c4b6,34267,1733359912675}] 2024-12-05T00:51:54,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:54,533 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,534 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4e7968e43bb10158d4a67a8e4798895b, NAME => 'TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:51:54,534 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,534 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:51:54,534 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,534 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,536 INFO [StoreOpener-4e7968e43bb10158d4a67a8e4798895b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,538 INFO [StoreOpener-4e7968e43bb10158d4a67a8e4798895b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e7968e43bb10158d4a67a8e4798895b columnFamilyName cf 2024-12-05T00:51:54,538 DEBUG [StoreOpener-4e7968e43bb10158d4a67a8e4798895b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:51:54,539 INFO [StoreOpener-4e7968e43bb10158d4a67a8e4798895b-1 {}] regionserver.HStore(327): Store=4e7968e43bb10158d4a67a8e4798895b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:51:54,539 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,539 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,540 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,540 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,540 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,542 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,545 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:51:54,545 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4e7968e43bb10158d4a67a8e4798895b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69695088, jitterRate=0.03853774070739746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:51:54,545 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:54,546 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4e7968e43bb10158d4a67a8e4798895b: Running coprocessor pre-open hook at 1733359914534Writing region info on filesystem at 1733359914534Initializing all the Stores at 1733359914536 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733359914536Cleaning up temporary data from old regions at 1733359914540 (+4 ms)Running coprocessor post-open hooks at 1733359914545 (+5 ms)Region opened successfully at 1733359914546 (+1 ms) 2024-12-05T00:51:54,548 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b., pid=6, masterSystemTime=1733359914529 2024-12-05T00:51:54,551 DEBUG [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,551 INFO [RS_OPEN_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,552 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4e7968e43bb10158d4a67a8e4798895b, regionState=OPEN, openSeqNum=2, regionLocation=fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:54,558 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32923 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=fea72ea5c4b6,34267,1733359912675, table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T00:51:54,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e7968e43bb10158d4a67a8e4798895b, server=fea72ea5c4b6,34267,1733359912675 because future has completed 2024-12-05T00:51:54,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:51:54,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4e7968e43bb10158d4a67a8e4798895b, server=fea72ea5c4b6,34267,1733359912675 in 186 msec 2024-12-05T00:51:54,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:51:54,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4e7968e43bb10158d4a67a8e4798895b, ASSIGN in 348 msec 2024-12-05T00:51:54,569 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:51:54,570 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733359914569"}]},"ts":"1733359914569"} 2024-12-05T00:51:54,573 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T00:51:54,575 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:51:54,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 401 msec 2024-12-05T00:51:54,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:51:54,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T00:51:54,812 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T00:51:54,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:51:54,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T00:51:54,815 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:51:54,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T00:51:54,819 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b., hostname=fea72ea5c4b6,34267,1733359912675, seqNum=2] 2024-12-05T00:51:54,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T00:51:54,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T00:51:54,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:54,826 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T00:51:54,828 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T00:51:54,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T00:51:54,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:54,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34267 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T00:51:54,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:54,984 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4e7968e43bb10158d4a67a8e4798895b 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T00:51:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/.tmp/cf/e2a9831f555a4527b50bf83042f9c69e is 36, key is row/cf:cq/1733359914820/Put/seqid=0 2024-12-05T00:51:55,002 WARN [IPC Server handler 2 on default port 34663 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:51:55,002 WARN [IPC Server handler 2 on default port 34663 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:51:55,002 WARN [IPC Server handler 2 on default port 34663 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:51:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741839_1015 (size=4787) 2024-12-05T00:51:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741839_1015 (size=4787) 2024-12-05T00:51:55,013 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/.tmp/cf/e2a9831f555a4527b50bf83042f9c69e 2024-12-05T00:51:55,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/.tmp/cf/e2a9831f555a4527b50bf83042f9c69e as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/cf/e2a9831f555a4527b50bf83042f9c69e 2024-12-05T00:51:55,032 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/cf/e2a9831f555a4527b50bf83042f9c69e, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T00:51:55,034 INFO [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4e7968e43bb10158d4a67a8e4798895b in 50ms, sequenceid=5, compaction requested=false 2024-12-05T00:51:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4e7968e43bb10158d4a67a8e4798895b: 2024-12-05T00:51:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fea72ea5c4b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T00:51:55,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T00:51:55,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T00:51:55,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-12-05T00:51:55,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 221 msec 2024-12-05T00:51:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:51:55,142 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T00:51:55,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T00:51:55,146 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:51:55,146 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:55,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,146 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:51:55,146 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T00:51:55,146 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=863623230, stopped=false 2024-12-05T00:51:55,146 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fea72ea5c4b6,32923,1733359912257 2024-12-05T00:51:55,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:55,202 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:55,202 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:55,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:55,202 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:55,202 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:55,202 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:51:55,202 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T00:51:55,202 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:55,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:51:55,203 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:55,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,39187,1733359912621' ***** 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:55,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,34267,1733359912675' ***** 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fea72ea5c4b6,35921,1733359912725' ***** 2024-12-05T00:51:55,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T00:51:55,203 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:55,203 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:55,204 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:55,204 INFO [RS:1;fea72ea5c4b6:34267 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:55,204 INFO [RS:1;fea72ea5c4b6:34267 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:55,204 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:55,204 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(3091): Received CLOSE for 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fea72ea5c4b6:39187. 2024-12-05T00:51:55,204 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:55,204 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T00:51:55,204 DEBUG [RS:0;fea72ea5c4b6:39187 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T00:51:55,204 DEBUG [RS:0;fea72ea5c4b6:39187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:55,204 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,39187,1733359912621; all regions closed. 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:55,204 INFO [RS:2;fea72ea5c4b6:35921 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fea72ea5c4b6:35921. 2024-12-05T00:51:55,204 DEBUG [RS:2;fea72ea5c4b6:35921 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:55,204 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T00:51:55,204 DEBUG [RS:2;fea72ea5c4b6:35921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,205 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,35921,1733359912725; all regions closed. 2024-12-05T00:51:55,205 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:51:55,206 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(959): stopping server fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:55,206 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:55,206 INFO [RS:1;fea72ea5c4b6:34267 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fea72ea5c4b6:34267. 2024-12-05T00:51:55,206 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4e7968e43bb10158d4a67a8e4798895b, disabling compactions & flushes 2024-12-05T00:51:55,206 INFO [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,206 DEBUG [RS:1;fea72ea5c4b6:34267 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:51:55,206 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,207 DEBUG [RS:1;fea72ea5c4b6:34267 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,207 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. after waiting 0 ms 2024-12-05T00:51:55,207 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,207 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:55,207 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:55,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:55,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T00:51:55,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,208 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,208 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T00:51:55,208 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1325): Online Regions={4e7968e43bb10158d4a67a8e4798895b=TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b., 1588230740=hbase:meta,,1.1588230740} 2024-12-05T00:51:55,208 DEBUG [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4e7968e43bb10158d4a67a8e4798895b 2024-12-05T00:51:55,208 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:51:55,208 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:51:55,208 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:51:55,208 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:51:55,209 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:51:55,209 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T00:51:55,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:51:55,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:51:55,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741833_1009 (size=93) 2024-12-05T00:51:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741834_1010 (size=93) 2024-12-05T00:51:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741834_1010 (size=93) 2024-12-05T00:51:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741834_1010 (size=93) 2024-12-05T00:51:55,225 DEBUG [RS:2;fea72ea5c4b6:35921 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fea72ea5c4b6%2C35921%2C1733359912725:(num 1733359913496) 2024-12-05T00:51:55,225 DEBUG [RS:2;fea72ea5c4b6:35921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:55,225 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/default/TestHBaseWalOnEC/4e7968e43bb10158d4a67a8e4798895b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:55,225 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:55,225 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:55,226 INFO [RS:2;fea72ea5c4b6:35921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35921 2024-12-05T00:51:55,227 INFO [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,227 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4e7968e43bb10158d4a67a8e4798895b: Waiting for close lock at 1733359915206Running coprocessor pre-close hooks at 1733359915206Disabling compacts and flushes for region at 1733359915206Disabling writes for close at 1733359915207 (+1 ms)Writing region close event to WAL at 1733359915210 (+3 ms)Running coprocessor post-close hooks at 1733359915227 (+17 ms)Closed at 1733359915227 2024-12-05T00:51:55,228 DEBUG [RS_CLOSE_REGION-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b. 2024-12-05T00:51:55,228 DEBUG [RS:0;fea72ea5c4b6:39187 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs 2024-12-05T00:51:55,228 INFO [RS:0;fea72ea5c4b6:39187 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fea72ea5c4b6%2C39187%2C1733359912621:(num 1733359913504) 2024-12-05T00:51:55,228 DEBUG [RS:0;fea72ea5c4b6:39187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,228 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,228 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:55,229 INFO [RS:0;fea72ea5c4b6:39187 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39187 2024-12-05T00:51:55,229 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:55,235 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/info/933c2ded4e3a4aeb82986f788f67f2af is 153, key is TestHBaseWalOnEC,,1733359914174.4e7968e43bb10158d4a67a8e4798895b./info:regioninfo/1733359914552/Put/seqid=0 2024-12-05T00:51:55,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741840_1016 (size=6637) 2024-12-05T00:51:55,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741840_1016 (size=6637) 2024-12-05T00:51:55,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741840_1016 (size=6637) 2024-12-05T00:51:55,243 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/info/933c2ded4e3a4aeb82986f788f67f2af 2024-12-05T00:51:55,246 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,35921,1733359912725 2024-12-05T00:51:55,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:55,247 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:55,247 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,257 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,39187,1733359912621 2024-12-05T00:51:55,257 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:55,257 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,39187,1733359912621] 2024-12-05T00:51:55,257 ERROR [pool-324-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$373/0x00007f54ec8f8000@6944a5c2 rejected from java.util.concurrent.ThreadPoolExecutor@6d7b3c32[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-05T00:51:55,262 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,265 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/ns/aca53800ef9e4712abcc38fb51845627 is 43, key is default/ns:d/1733359914111/Put/seqid=0 2024-12-05T00:51:55,267 INFO [regionserver/fea72ea5c4b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741841_1017 (size=5153) 2024-12-05T00:51:55,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741841_1017 (size=5153) 2024-12-05T00:51:55,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741841_1017 (size=5153) 2024-12-05T00:51:55,274 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/ns/aca53800ef9e4712abcc38fb51845627 2024-12-05T00:51:55,278 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,39187,1733359912621 already deleted, retry=false 2024-12-05T00:51:55,278 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,39187,1733359912621 expired; onlineServers=2 2024-12-05T00:51:55,278 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,35921,1733359912725] 2024-12-05T00:51:55,289 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,35921,1733359912725 already deleted, retry=false 2024-12-05T00:51:55,289 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,35921,1733359912725 expired; onlineServers=1 2024-12-05T00:51:55,295 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/table/49dd06c451db4dafbe9645c3d5dcf870 is 52, key is TestHBaseWalOnEC/table:state/1733359914569/Put/seqid=0 2024-12-05T00:51:55,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741842_1018 (size=5249) 2024-12-05T00:51:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741842_1018 (size=5249) 2024-12-05T00:51:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741842_1018 (size=5249) 2024-12-05T00:51:55,303 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/table/49dd06c451db4dafbe9645c3d5dcf870 2024-12-05T00:51:55,310 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/info/933c2ded4e3a4aeb82986f788f67f2af as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/info/933c2ded4e3a4aeb82986f788f67f2af 2024-12-05T00:51:55,318 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/info/933c2ded4e3a4aeb82986f788f67f2af, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T00:51:55,319 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/ns/aca53800ef9e4712abcc38fb51845627 as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/ns/aca53800ef9e4712abcc38fb51845627 2024-12-05T00:51:55,326 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/ns/aca53800ef9e4712abcc38fb51845627, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T00:51:55,328 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/.tmp/table/49dd06c451db4dafbe9645c3d5dcf870 as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/table/49dd06c451db4dafbe9645c3d5dcf870 2024-12-05T00:51:55,338 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/table/49dd06c451db4dafbe9645c3d5dcf870, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T00:51:55,340 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-12-05T00:51:55,349 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T00:51:55,350 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:51:55,350 INFO [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:55,350 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733359915208Running coprocessor pre-close hooks at 1733359915208Disabling compacts and flushes for region at 1733359915208Disabling writes for close at 1733359915209 (+1 ms)Obtaining lock to block concurrent updates at 1733359915209Preparing flush snapshotting stores in 1588230740 at 1733359915209Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733359915209Flushing stores of hbase:meta,,1.1588230740 at 1733359915211 (+2 ms)Flushing 1588230740/info: creating writer at 1733359915211Flushing 1588230740/info: appending metadata at 1733359915234 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733359915234Flushing 1588230740/ns: creating writer at 1733359915250 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733359915264 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733359915264Flushing 1588230740/table: creating writer at 1733359915282 (+18 ms)Flushing 1588230740/table: appending metadata at 1733359915295 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733359915295Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c1c46b4: reopening flushed file at 1733359915309 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67165bce: reopening flushed file at 1733359915318 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28602153: reopening flushed file at 1733359915327 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1733359915340 (+13 ms)Writing region close event to WAL at 1733359915343 (+3 ms)Running coprocessor post-close hooks at 1733359915350 (+7 ms)Closed at 1733359915350 2024-12-05T00:51:55,351 DEBUG [RS_CLOSE_META-regionserver/fea72ea5c4b6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T00:51:55,362 INFO [regionserver/fea72ea5c4b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T00:51:55,362 INFO [regionserver/fea72ea5c4b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T00:51:55,368 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,368 INFO [RS:2;fea72ea5c4b6:35921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:55,368 INFO [RS:2;fea72ea5c4b6:35921 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,35921,1733359912725; zookeeper connection closed. 2024-12-05T00:51:55,368 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x101a2f940cc0003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,368 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a9c7893 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a9c7893 2024-12-05T00:51:55,379 INFO [RS:0;fea72ea5c4b6:39187 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:55,379 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,379 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39187-0x101a2f940cc0001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,379 INFO [RS:0;fea72ea5c4b6:39187 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,39187,1733359912621; zookeeper connection closed. 2024-12-05T00:51:55,379 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14e25712 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14e25712 2024-12-05T00:51:55,408 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(976): stopping server fea72ea5c4b6,34267,1733359912675; all regions closed. 2024-12-05T00:51:55,409 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,409 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,409 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,409 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,409 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741836_1012 (size=2751) 2024-12-05T00:51:55,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741836_1012 (size=2751) 2024-12-05T00:51:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741836_1012 (size=2751) 2024-12-05T00:51:55,417 DEBUG [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs 2024-12-05T00:51:55,417 INFO [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fea72ea5c4b6%2C34267%2C1733359912675.meta:.meta(num 1733359913969) 2024-12-05T00:51:55,417 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,418 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,418 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,418 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:55,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741835_1011 (size=1298) 2024-12-05T00:51:55,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741835_1011 (size=1298) 2024-12-05T00:51:55,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741835_1011 (size=1298) 2024-12-05T00:51:55,441 DEBUG [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/oldWALs 2024-12-05T00:51:55,441 INFO [RS:1;fea72ea5c4b6:34267 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fea72ea5c4b6%2C34267%2C1733359912675:(num 1733359913512) 2024-12-05T00:51:55,441 DEBUG [RS:1;fea72ea5c4b6:34267 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:51:55,441 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T00:51:55,441 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:55,442 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.ChoreService(370): Chore service for: regionserver/fea72ea5c4b6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:55,442 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:55,442 INFO [RS:1;fea72ea5c4b6:34267 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34267 2024-12-05T00:51:55,442 INFO [regionserver/fea72ea5c4b6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:55,457 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fea72ea5c4b6,34267,1733359912675 2024-12-05T00:51:55,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:51:55,457 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:55,458 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fea72ea5c4b6,34267,1733359912675] 2024-12-05T00:51:55,478 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fea72ea5c4b6,34267,1733359912675 already deleted, retry=false 2024-12-05T00:51:55,478 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fea72ea5c4b6,34267,1733359912675 expired; onlineServers=0 2024-12-05T00:51:55,478 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fea72ea5c4b6,32923,1733359912257' ***** 2024-12-05T00:51:55,478 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T00:51:55,479 DEBUG [M:0;fea72ea5c4b6:32923 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T00:51:55,479 DEBUG [M:0;fea72ea5c4b6:32923 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T00:51:55,479 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359913156 {}] cleaner.HFileCleaner(306): Exit Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.large.0-1733359913156,5,FailOnTimeoutGroup] 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.ChoreService(370): Chore service for: master/fea72ea5c4b6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T00:51:55,479 DEBUG [M:0;fea72ea5c4b6:32923 {}] master.HMaster(1795): Stopping service threads 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T00:51:55,479 INFO [M:0;fea72ea5c4b6:32923 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T00:51:55,480 INFO [M:0;fea72ea5c4b6:32923 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T00:51:55,480 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T00:51:55,480 DEBUG [master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359913156 {}] cleaner.HFileCleaner(306): Exit Thread[master/fea72ea5c4b6:0:becomeActiveMaster-HFileCleaner.small.0-1733359913156,5,FailOnTimeoutGroup] 2024-12-05T00:51:55,480 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T00:51:55,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T00:51:55,489 DEBUG [M:0;fea72ea5c4b6:32923 {}] zookeeper.ZKUtil(347): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T00:51:55,489 WARN [M:0;fea72ea5c4b6:32923 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T00:51:55,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:51:55,493 INFO [M:0;fea72ea5c4b6:32923 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/.lastflushedseqids 2024-12-05T00:51:55,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741843_1019 (size=127) 2024-12-05T00:51:55,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741843_1019 (size=127) 2024-12-05T00:51:55,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741843_1019 (size=127) 2024-12-05T00:51:55,568 INFO [RS:1;fea72ea5c4b6:34267 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:55,568 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,568 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34267-0x101a2f940cc0002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:55,568 INFO [RS:1;fea72ea5c4b6:34267 {}] regionserver.HRegionServer(1031): Exiting; stopping=fea72ea5c4b6,34267,1733359912675; zookeeper connection closed. 2024-12-05T00:51:55,568 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14b17c5f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14b17c5f 2024-12-05T00:51:55,569 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T00:51:55,922 INFO [M:0;fea72ea5c4b6:32923 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T00:51:55,922 INFO [M:0;fea72ea5c4b6:32923 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T00:51:55,922 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:51:55,922 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:55,922 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:55,922 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:51:55,922 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:55,922 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-05T00:51:55,938 DEBUG [M:0;fea72ea5c4b6:32923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7fbe55d5cd134eedb6ea640135545488 is 82, key is hbase:meta,,1/info:regioninfo/1733359914014/Put/seqid=0 2024-12-05T00:51:55,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741844_1020 (size=5672) 2024-12-05T00:51:55,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741844_1020 (size=5672) 2024-12-05T00:51:55,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741844_1020 (size=5672) 2024-12-05T00:51:55,945 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7fbe55d5cd134eedb6ea640135545488 2024-12-05T00:51:55,971 DEBUG [M:0;fea72ea5c4b6:32923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68600210d6164982b6a05fd52c8dd6da is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733359914577/Put/seqid=0 2024-12-05T00:51:55,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741845_1021 (size=6441) 2024-12-05T00:51:55,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741845_1021 (size=6441) 2024-12-05T00:51:55,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741845_1021 (size=6441) 2024-12-05T00:51:55,979 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68600210d6164982b6a05fd52c8dd6da 2024-12-05T00:51:55,998 DEBUG [M:0;fea72ea5c4b6:32923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/52ec4595ec284daba4651e39e1788f67 is 69, key is fea72ea5c4b6,34267,1733359912675/rs:state/1733359913241/Put/seqid=0 2024-12-05T00:51:56,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741846_1022 (size=5294) 2024-12-05T00:51:56,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741846_1022 (size=5294) 2024-12-05T00:51:56,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741846_1022 (size=5294) 2024-12-05T00:51:56,005 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/52ec4595ec284daba4651e39e1788f67 2024-12-05T00:51:56,011 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7fbe55d5cd134eedb6ea640135545488 as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7fbe55d5cd134eedb6ea640135545488 2024-12-05T00:51:56,016 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7fbe55d5cd134eedb6ea640135545488, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T00:51:56,017 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68600210d6164982b6a05fd52c8dd6da as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/68600210d6164982b6a05fd52c8dd6da 2024-12-05T00:51:56,024 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/68600210d6164982b6a05fd52c8dd6da, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T00:51:56,025 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/52ec4595ec284daba4651e39e1788f67 as hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/52ec4595ec284daba4651e39e1788f67 2024-12-05T00:51:56,031 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34663/user/jenkins/test-data/42794fb4-dd0a-b773-ed7b-7b532ccf0eb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/52ec4595ec284daba4651e39e1788f67, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T00:51:56,033 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=72, compaction requested=false 2024-12-05T00:51:56,034 INFO [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:51:56,034 DEBUG [M:0;fea72ea5c4b6:32923 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733359915922Disabling compacts and flushes for region at 1733359915922Disabling writes for close at 1733359915922Obtaining lock to block concurrent updates at 1733359915922Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733359915922Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733359915923 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733359915924 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733359915924Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733359915938 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733359915938Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733359915952 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733359915971 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733359915971Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733359915985 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733359915998 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733359915998Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1eb2687a: reopening flushed file at 1733359916010 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c4f3fd3: reopening flushed file at 1733359916016 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b2500c5: reopening flushed file at 1733359916024 (+8 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=72, compaction requested=false at 1733359916033 (+9 ms)Writing region close event to WAL at 1733359916034 (+1 ms)Closed at 1733359916034 2024-12-05T00:51:56,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:56,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:56,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:56,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:56,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T00:51:56,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741830_1006 (size=32695) 2024-12-05T00:51:56,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45923 is added to blk_1073741830_1006 (size=32695) 2024-12-05T00:51:56,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42605 is added to blk_1073741830_1006 (size=32695) 2024-12-05T00:51:56,038 INFO [M:0;fea72ea5c4b6:32923 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T00:51:56,038 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T00:51:56,038 INFO [M:0;fea72ea5c4b6:32923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32923 2024-12-05T00:51:56,039 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T00:51:56,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:56,186 INFO [M:0;fea72ea5c4b6:32923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T00:51:56,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32923-0x101a2f940cc0000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T00:51:56,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2735ddad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:56,226 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@387982f9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:56,226 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:56,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67530df7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:56,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65c795a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:56,228 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:56,228 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:56,228 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:56,228 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1172660541-172.17.0.2-1733359909120 (Datanode Uuid 0bb39949-6f35-4273-82fb-1d105e28b9d2) service to localhost/127.0.0.1:34663 2024-12-05T00:51:56,229 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data5/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,229 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data6/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,229 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:56,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62356e52{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:56,237 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@259f19e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:56,237 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:56,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58778b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:56,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e96b0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:56,239 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:56,239 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:56,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:56,239 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1172660541-172.17.0.2-1733359909120 (Datanode Uuid e9305cf5-98e7-4f7c-8bef-c4fd47e37393) service to localhost/127.0.0.1:34663 2024-12-05T00:51:56,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data3/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data4/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,240 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:56,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@776d8022{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:51:56,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ffd79d0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:56,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:56,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@592a8291{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:56,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45d5becd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:56,252 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T00:51:56,252 WARN [BP-1172660541-172.17.0.2-1733359909120 heartbeating to localhost/127.0.0.1:34663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1172660541-172.17.0.2-1733359909120 (Datanode Uuid b223ef43-9ccf-41d0-9f38-6606b0c952ff) service to localhost/127.0.0.1:34663 2024-12-05T00:51:56,253 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data1/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,253 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/cluster_a83fee05-1ec3-a553-1a88-5d67517fd4a7/data/data2/current/BP-1172660541-172.17.0.2-1733359909120 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T00:51:56,253 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T00:51:56,253 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T00:51:56,253 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T00:51:56,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7826e44{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:51:56,260 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69e41ac7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T00:51:56,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T00:51:56,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c410e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T00:51:56,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7005d2e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5870f041-08d8-9faa-717e-ae8b54779b83/hadoop.log.dir/,STOPPED} 2024-12-05T00:51:56,268 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T00:51:56,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T00:51:56,309 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 87) - Thread LEAK? -, OpenFileDescriptor=518 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=667 (was 679), ProcessCount=11 (was 11), AvailableMemoryMB=10207 (was 10836)