2024-11-10 15:15:25,484 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 15:15:25,495 main DEBUG Took 0.009391 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 15:15:25,495 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 15:15:25,496 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 15:15:25,496 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 15:15:25,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,506 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 15:15:25,525 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,527 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,527 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,528 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,528 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,528 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,529 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,529 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,530 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,530 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,531 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,531 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,532 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,532 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,532 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,533 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,533 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,533 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,534 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,534 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,534 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,535 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,535 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,535 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:15:25,536 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,536 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 15:15:25,538 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:15:25,539 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 15:15:25,541 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 15:15:25,541 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 15:15:25,542 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 15:15:25,542 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 15:15:25,551 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 15:15:25,554 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 15:15:25,555 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 15:15:25,556 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 15:15:25,556 main DEBUG createAppenders(={Console}) 2024-11-10 15:15:25,557 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-10 15:15:25,557 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 15:15:25,557 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-10 15:15:25,558 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 15:15:25,558 main DEBUG OutputStream closed 2024-11-10 15:15:25,558 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 15:15:25,559 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 15:15:25,559 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-10 15:15:25,625 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 15:15:25,627 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 15:15:25,628 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 15:15:25,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 15:15:25,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 15:15:25,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 15:15:25,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 15:15:25,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 15:15:25,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 15:15:25,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 15:15:25,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 15:15:25,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 15:15:25,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 15:15:25,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 15:15:25,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 15:15:25,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 15:15:25,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 15:15:25,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 15:15:25,636 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 15:15:25,637 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-10 15:15:25,637 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 15:15:25,638 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-10T15:15:25,654 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-10 15:15:25,657 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 15:15:25,657 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T15:15:25,930 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0 2024-11-10T15:15:25,956 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0, deleteOnExit=true 2024-11-10T15:15:25,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/test.cache.data in system properties and HBase conf 2024-11-10T15:15:25,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T15:15:25,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir in system properties and HBase conf 2024-11-10T15:15:25,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T15:15:25,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T15:15:25,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T15:15:26,064 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T15:15:26,163 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T15:15:26,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:15:26,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:15:26,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T15:15:26,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:15:26,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T15:15:26,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T15:15:26,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:15:26,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:15:26,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T15:15:26,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/nfs.dump.dir in system properties and HBase conf 2024-11-10T15:15:26,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/java.io.tmpdir in system properties and HBase conf 2024-11-10T15:15:26,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:15:26,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T15:15:26,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T15:15:26,991 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T15:15:27,078 INFO [Time-limited test {}] log.Log(170): Logging initialized @2295ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T15:15:27,159 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:27,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:27,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:27,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:27,250 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:15:27,263 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:27,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:27,266 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:27,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/java.io.tmpdir/jetty-localhost-35819-hadoop-hdfs-3_4_1-tests_jar-_-any-15736636116561856540/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:15:27,476 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:35819} 2024-11-10T15:15:27,476 INFO [Time-limited test {}] server.Server(415): Started @2694ms 2024-11-10T15:15:27,872 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:27,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:27,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:27,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:27,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:15:27,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:27,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:28,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/java.io.tmpdir/jetty-localhost-36241-hadoop-hdfs-3_4_1-tests_jar-_-any-349546545541691251/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:28,002 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:36241} 2024-11-10T15:15:28,003 INFO [Time-limited test {}] server.Server(415): Started @3221ms 2024-11-10T15:15:28,059 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:28,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:28,183 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:28,185 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:28,185 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:28,186 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:15:28,186 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:28,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:28,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/java.io.tmpdir/jetty-localhost-37777-hadoop-hdfs-3_4_1-tests_jar-_-any-16301040298515339448/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:28,308 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:37777} 2024-11-10T15:15:28,308 INFO [Time-limited test {}] server.Server(415): Started @3526ms 2024-11-10T15:15:28,310 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:28,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:28,355 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:28,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:28,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:28,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:15:28,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:28,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:28,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/java.io.tmpdir/jetty-localhost-37149-hadoop-hdfs-3_4_1-tests_jar-_-any-7723678921691644180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:28,489 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:37149} 2024-11-10T15:15:28,489 INFO [Time-limited test {}] server.Server(415): Started @3708ms 2024-11-10T15:15:28,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:28,504 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data4/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,504 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data2/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,504 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data3/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,504 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data1/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,558 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:28,570 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:28,622 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data5/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,623 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data6/current/BP-100759265-172.17.0.2-1731251726743/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:28,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x541d7cfd5b58c298 with lease ID 0x10aca27865de4266: Processing first storage report for DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf from datanode DatanodeRegistration(127.0.0.1:42089, datanodeUuid=a2216ef1-02f6-440c-baa5-89e80dde5fef, infoPort=35317, infoSecurePort=0, ipcPort=39219, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x541d7cfd5b58c298 with lease ID 0x10aca27865de4266: from storage DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf node DatanodeRegistration(127.0.0.1:42089, datanodeUuid=a2216ef1-02f6-440c-baa5-89e80dde5fef, infoPort=35317, infoSecurePort=0, ipcPort=39219, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,632 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8542cb6154c71a6f with lease ID 0x10aca27865de4265: Processing first storage report for DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7 from datanode DatanodeRegistration(127.0.0.1:39423, datanodeUuid=2e40fac8-c13e-486f-8bc7-17b44f8dce3f, infoPort=36381, infoSecurePort=0, ipcPort=41547, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,632 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8542cb6154c71a6f with lease ID 0x10aca27865de4265: from storage DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7 node DatanodeRegistration(127.0.0.1:39423, datanodeUuid=2e40fac8-c13e-486f-8bc7-17b44f8dce3f, infoPort=36381, infoSecurePort=0, ipcPort=41547, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,632 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x541d7cfd5b58c298 with lease ID 0x10aca27865de4266: Processing first storage report for DS-fff39d96-163c-4483-950a-3689d4c624fc from datanode DatanodeRegistration(127.0.0.1:42089, datanodeUuid=a2216ef1-02f6-440c-baa5-89e80dde5fef, infoPort=35317, infoSecurePort=0, ipcPort=39219, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x541d7cfd5b58c298 with lease ID 0x10aca27865de4266: from storage DS-fff39d96-163c-4483-950a-3689d4c624fc node DatanodeRegistration(127.0.0.1:42089, datanodeUuid=a2216ef1-02f6-440c-baa5-89e80dde5fef, infoPort=35317, infoSecurePort=0, ipcPort=39219, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8542cb6154c71a6f with lease ID 0x10aca27865de4265: Processing first storage report for DS-cc233893-47b3-4ff1-8bd8-e94f19fc268d from datanode DatanodeRegistration(127.0.0.1:39423, datanodeUuid=2e40fac8-c13e-486f-8bc7-17b44f8dce3f, infoPort=36381, infoSecurePort=0, ipcPort=41547, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8542cb6154c71a6f with lease ID 0x10aca27865de4265: from storage DS-cc233893-47b3-4ff1-8bd8-e94f19fc268d node DatanodeRegistration(127.0.0.1:39423, datanodeUuid=2e40fac8-c13e-486f-8bc7-17b44f8dce3f, infoPort=36381, infoSecurePort=0, ipcPort=41547, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,655 WARN [Thread-116 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:28,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcab209aba4a35b2b with lease ID 0x10aca27865de4267: Processing first storage report for DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c from datanode DatanodeRegistration(127.0.0.1:43119, datanodeUuid=327d567e-824f-4be2-b360-cac504a36acf, infoPort=44377, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcab209aba4a35b2b with lease ID 0x10aca27865de4267: from storage DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c node DatanodeRegistration(127.0.0.1:43119, datanodeUuid=327d567e-824f-4be2-b360-cac504a36acf, infoPort=44377, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcab209aba4a35b2b with lease ID 0x10aca27865de4267: Processing first storage report for DS-c15105e9-43a1-4809-aa0d-1c836df2d0d1 from datanode DatanodeRegistration(127.0.0.1:43119, datanodeUuid=327d567e-824f-4be2-b360-cac504a36acf, infoPort=44377, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743) 2024-11-10T15:15:28,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcab209aba4a35b2b with lease ID 0x10aca27865de4267: from storage DS-c15105e9-43a1-4809-aa0d-1c836df2d0d1 node DatanodeRegistration(127.0.0.1:43119, datanodeUuid=327d567e-824f-4be2-b360-cac504a36acf, infoPort=44377, infoSecurePort=0, ipcPort=34615, storageInfo=lv=-57;cid=testClusterID;nsid=523539292;c=1731251726743), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:28,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0 2024-11-10T15:15:28,976 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-10T15:15:29,038 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=155, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=32, ProcessCount=11, AvailableMemoryMB=9522 2024-11-10T15:15:29,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T15:15:29,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-10T15:15:29,131 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/zookeeper_0, clientPort=59868, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T15:15:29,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59868 2024-11-10T15:15:29,151 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:29,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:29,258 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:29,259 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:29,305 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:49704 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:42089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49704 dst: /127.0.0.1:42089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:29,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-10T15:15:29,723 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:29,732 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 with version=8 2024-11-10T15:15:29,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/hbase-staging 2024-11-10T15:15:29,834 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T15:15:30,083 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:30,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,098 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:30,098 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:30,239 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T15:15:30,301 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T15:15:30,310 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T15:15:30,314 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:30,342 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28821 (auto-detected) 2024-11-10T15:15:30,343 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-10T15:15:30,362 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45947 2024-11-10T15:15:30,385 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45947 connecting to ZooKeeper ensemble=127.0.0.1:59868 2024-11-10T15:15:30,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459470x0, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:30,420 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45947-0x1010272dd660000 connected 2024-11-10T15:15:30,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:30,470 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9, hbase.cluster.distributed=false 2024-11-10T15:15:30,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:30,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45947 2024-11-10T15:15:30,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45947 2024-11-10T15:15:30,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45947 2024-11-10T15:15:30,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45947 2024-11-10T15:15:30,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45947 2024-11-10T15:15:30,618 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:30,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,620 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:30,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:30,624 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:30,626 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:30,627 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43733 2024-11-10T15:15:30,629 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43733 connecting to ZooKeeper ensemble=127.0.0.1:59868 2024-11-10T15:15:30,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437330x0, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:30,643 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:437330x0, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:30,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43733-0x1010272dd660001 connected 2024-11-10T15:15:30,648 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:30,656 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:30,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:30,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:30,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43733 2024-11-10T15:15:30,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43733 2024-11-10T15:15:30,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43733 2024-11-10T15:15:30,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43733 2024-11-10T15:15:30,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43733 2024-11-10T15:15:30,684 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:30,684 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,685 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:30,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:30,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:30,686 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:30,687 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38035 2024-11-10T15:15:30,690 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38035 connecting to ZooKeeper ensemble=127.0.0.1:59868 2024-11-10T15:15:30,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,696 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380350x0, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:30,705 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38035-0x1010272dd660002 connected 2024-11-10T15:15:30,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:30,706 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:30,707 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:30,708 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:30,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:30,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38035 2024-11-10T15:15:30,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38035 2024-11-10T15:15:30,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38035 2024-11-10T15:15:30,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38035 2024-11-10T15:15:30,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38035 2024-11-10T15:15:30,735 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:30,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,736 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:30,736 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:30,736 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:30,737 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39231 2024-11-10T15:15:30,739 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39231 connecting to ZooKeeper ensemble=127.0.0.1:59868 2024-11-10T15:15:30,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392310x0, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:30,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:392310x0, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:30,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39231-0x1010272dd660003 connected 2024-11-10T15:15:30,749 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:30,750 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:30,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:30,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:30,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-10T15:15:30,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39231 2024-11-10T15:15:30,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39231 2024-11-10T15:15:30,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-10T15:15:30,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-10T15:15:30,770 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b1c88e26310d:45947 2024-11-10T15:15:30,770 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b1c88e26310d,45947,1731251729890 2024-11-10T15:15:30,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,779 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b1c88e26310d,45947,1731251729890 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,801 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T15:15:30,803 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b1c88e26310d,45947,1731251729890 from backup master directory 2024-11-10T15:15:30,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b1c88e26310d,45947,1731251729890 2024-11-10T15:15:30,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:30,806 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:30,806 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b1c88e26310d,45947,1731251729890 2024-11-10T15:15:30,808 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T15:15:30,810 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T15:15:30,876 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/hbase.id] with ID: 912af603-23e0-4ee7-8440-0ba37488408e 2024-11-10T15:15:30,876 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/.tmp/hbase.id 2024-11-10T15:15:30,883 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:30,884 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:30,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57194 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57194 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:30,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-10T15:15:30,893 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:30,894 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/.tmp/hbase.id]:[hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/hbase.id] 2024-11-10T15:15:30,934 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:30,939 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T15:15:30,957 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-10T15:15:30,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:30,973 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:30,973 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:30,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57212 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57212 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:30,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-10T15:15:30,982 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:30,997 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:15:30,999 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T15:15:31,005 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:15:31,036 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,037 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,040 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57240 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57240 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:31,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-10T15:15:31,046 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:31,064 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store 2024-11-10T15:15:31,079 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,079 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57138 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57138 dst: /127.0.0.1:39423 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-10T15:15:31,087 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:31,092 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T15:15:31,094 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:31,096 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:15:31,096 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:31,096 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:31,097 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:15:31,097 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:31,098 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:31,099 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731251731095Disabling compacts and flushes for region at 1731251731095Disabling writes for close at 1731251731097 (+2 ms)Writing region close event to WAL at 1731251731098 (+1 ms)Closed at 1731251731098 2024-11-10T15:15:31,101 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/.initializing 2024-11-10T15:15:31,101 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/WALs/b1c88e26310d,45947,1731251729890 2024-11-10T15:15:31,109 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:15:31,124 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C45947%2C1731251729890, suffix=, logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/WALs/b1c88e26310d,45947,1731251729890, archiveDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/oldWALs, maxLogs=10 2024-11-10T15:15:31,154 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/WALs/b1c88e26310d,45947,1731251729890/b1c88e26310d%2C45947%2C1731251729890.1731251731128, exclude list is [], retry=0 2024-11-10T15:15:31,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:31,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39423,DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7,DISK] 2024-11-10T15:15:31,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42089,DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf,DISK] 2024-11-10T15:15:31,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43119,DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c,DISK] 2024-11-10T15:15:31,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-10T15:15:31,220 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/WALs/b1c88e26310d,45947,1731251729890/b1c88e26310d%2C45947%2C1731251729890.1731251731128 2024-11-10T15:15:31,221 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36381:36381),(127.0.0.1/127.0.0.1:44377:44377),(127.0.0.1/127.0.0.1:35317:35317)] 2024-11-10T15:15:31,222 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:31,222 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:31,226 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,227 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T15:15:31,295 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:31,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T15:15:31,301 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:31,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,305 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T15:15:31,305 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:31,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T15:15:31,309 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:31,310 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,313 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,315 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,320 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,321 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,324 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:31,327 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:31,333 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:31,335 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65349944, jitterRate=-0.02620995044708252}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:31,340 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731251731239Initializing all the Stores at 1731251731241 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251731242 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251731242Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251731243 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251731243Cleaning up temporary data from old regions at 1731251731321 (+78 ms)Region opened successfully at 1731251731340 (+19 ms) 2024-11-10T15:15:31,341 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T15:15:31,377 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79c55a4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:31,411 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T15:15:31,423 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T15:15:31,423 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T15:15:31,426 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T15:15:31,427 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T15:15:31,432 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-10T15:15:31,432 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T15:15:31,459 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T15:15:31,468 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T15:15:31,470 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T15:15:31,473 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T15:15:31,474 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T15:15:31,477 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T15:15:31,479 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T15:15:31,482 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T15:15:31,483 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T15:15:31,485 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T15:15:31,486 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T15:15:31,504 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T15:15:31,506 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T15:15:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:31,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:31,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,514 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b1c88e26310d,45947,1731251729890, sessionid=0x1010272dd660000, setting cluster-up flag (Was=false) 2024-11-10T15:15:31,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,533 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T15:15:31,535 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b1c88e26310d,45947,1731251729890 2024-11-10T15:15:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:31,548 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T15:15:31,549 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b1c88e26310d,45947,1731251729890 2024-11-10T15:15:31,556 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T15:15:31,559 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(746): ClusterId : 912af603-23e0-4ee7-8440-0ba37488408e 2024-11-10T15:15:31,559 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(746): ClusterId : 912af603-23e0-4ee7-8440-0ba37488408e 2024-11-10T15:15:31,560 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(746): ClusterId : 912af603-23e0-4ee7-8440-0ba37488408e 2024-11-10T15:15:31,562 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:31,562 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:31,562 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:31,568 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:31,568 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:31,568 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:31,568 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:31,568 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:31,568 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:31,571 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:31,571 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:31,571 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:31,572 DEBUG [RS:0;b1c88e26310d:43733 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7989072a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:31,572 DEBUG [RS:2;b1c88e26310d:39231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43d59ca2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:31,572 DEBUG [RS:1;b1c88e26310d:38035 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72be0a4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:31,588 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b1c88e26310d:43733 2024-11-10T15:15:31,588 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b1c88e26310d:38035 2024-11-10T15:15:31,591 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:31,591 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:31,591 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:31,591 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:31,592 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:31,592 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:31,592 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b1c88e26310d:39231 2024-11-10T15:15:31,592 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:31,592 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:31,593 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:31,595 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=43733, startcode=1731251730578 2024-11-10T15:15:31,595 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=38035, startcode=1731251730684 2024-11-10T15:15:31,595 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=39231, startcode=1731251730735 2024-11-10T15:15:31,610 DEBUG [RS:2;b1c88e26310d:39231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:31,610 DEBUG [RS:1;b1c88e26310d:38035 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:31,611 DEBUG [RS:0;b1c88e26310d:43733 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:31,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-10T15:15:31,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-10T15:15:31,652 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42009, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:31,652 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59031, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:31,652 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57955, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:31,654 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:31,660 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T15:15:31,664 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T15:15:31,667 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T15:15:31,668 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T15:15:31,671 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T15:15:31,678 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b1c88e26310d,45947,1731251729890 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T15:15:31,686 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:31,686 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b1c88e26310d:0, corePoolSize=10, maxPoolSize=10 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:31,687 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T15:15:31,687 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T15:15:31,687 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T15:15:31,687 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,688 WARN [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T15:15:31,688 WARN [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T15:15:31,688 WARN [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T15:15:31,693 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731251761693 2024-11-10T15:15:31,693 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:31,694 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T15:15:31,695 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T15:15:31,696 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T15:15:31,700 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T15:15:31,701 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,701 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T15:15:31,701 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T15:15:31,701 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T15:15:31,701 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T15:15:31,702 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,706 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T15:15:31,707 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T15:15:31,708 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T15:15:31,710 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T15:15:31,711 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T15:15:31,711 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,711 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,714 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251731712,5,FailOnTimeoutGroup] 2024-11-10T15:15:31,715 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251731714,5,FailOnTimeoutGroup] 2024-11-10T15:15:31,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T15:15:31,716 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57262 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57262 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:31,717 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,717 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-10T15:15:31,726 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:31,727 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T15:15:31,728 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 2024-11-10T15:15:31,734 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,734 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:31,740 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:50858 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:42089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50858 dst: /127.0.0.1:42089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:31,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-10T15:15:31,747 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:31,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:31,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:15:31,754 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:15:31,754 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:31,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:15:31,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:15:31,759 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:31,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:15:31,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:15:31,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:31,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:15:31,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:15:31,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:31,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:31,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:15:31,769 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740 2024-11-10T15:15:31,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740 2024-11-10T15:15:31,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:15:31,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:15:31,774 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:31,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:15:31,785 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:31,786 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65420043, jitterRate=-0.025165393948554993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:31,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731251731749Initializing all the Stores at 1731251731751 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251731751Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251731751Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251731751Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251731751Cleaning up temporary data from old regions at 1731251731773 (+22 ms)Region opened successfully at 1731251731788 (+15 ms) 2024-11-10T15:15:31,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:15:31,789 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=38035, startcode=1731251730684 2024-11-10T15:15:31,789 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=43733, startcode=1731251730578 2024-11-10T15:15:31,789 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,45947,1731251729890 with port=39231, startcode=1731251730735 2024-11-10T15:15:31,789 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:15:31,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:15:31,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:15:31,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:15:31,791 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,793 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:31,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731251731789Disabling compacts and flushes for region at 1731251731789Disabling writes for close at 1731251731789Writing region close event to WAL at 1731251731792 (+3 ms)Closed at 1731251731793 (+1 ms) 2024-11-10T15:15:31,794 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,797 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:31,797 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T15:15:31,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T15:15:31,804 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 2024-11-10T15:15:31,804 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33323 2024-11-10T15:15:31,804 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:31,807 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,807 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 2024-11-10T15:15:31,807 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45947 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,808 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33323 2024-11-10T15:15:31,808 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:31,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:31,811 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 2024-11-10T15:15:31,811 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33323 2024-11-10T15:15:31,811 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:31,816 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:15:31,819 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T15:15:31,823 DEBUG [RS:1;b1c88e26310d:38035 {}] zookeeper.ZKUtil(111): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,823 WARN [RS:1;b1c88e26310d:38035 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:31,823 INFO [RS:1;b1c88e26310d:38035 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:15:31,823 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:31,824 DEBUG [RS:2;b1c88e26310d:39231 {}] zookeeper.ZKUtil(111): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,824 WARN [RS:2;b1c88e26310d:39231 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:31,824 INFO [RS:2;b1c88e26310d:39231 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:15:31,825 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,825 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,39231,1731251730735] 2024-11-10T15:15:31,825 DEBUG [RS:0;b1c88e26310d:43733 {}] zookeeper.ZKUtil(111): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,825 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,38035,1731251730684] 2024-11-10T15:15:31,825 WARN [RS:0;b1c88e26310d:43733 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:31,825 INFO [RS:0;b1c88e26310d:43733 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:15:31,825 DEBUG [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,43733,1731251730578] 2024-11-10T15:15:31,855 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:31,855 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:31,855 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:31,869 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:31,869 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:31,869 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:31,875 INFO [RS:1;b1c88e26310d:38035 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:31,875 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,875 INFO [RS:0;b1c88e26310d:43733 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:31,875 INFO [RS:2;b1c88e26310d:39231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:31,875 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,875 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,876 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:31,877 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:31,879 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:31,882 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:31,882 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:31,882 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:31,883 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,883 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,883 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,884 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,884 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:1;b1c88e26310d:38035 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,885 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,885 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,886 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:31,886 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,886 DEBUG [RS:2;b1c88e26310d:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,886 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,886 DEBUG [RS:0;b1c88e26310d:43733 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:31,894 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,894 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,39231,1731251730735-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,38035,1731251730684-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:31,895 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,43733,1731251730578-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:31,916 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:31,917 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:31,918 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,43733,1731251730578-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,918 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,38035,1731251730684-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,918 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:31,919 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,39231,1731251730735-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,919 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,919 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,919 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.Replication(171): b1c88e26310d,43733,1731251730578 started 2024-11-10T15:15:31,919 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.Replication(171): b1c88e26310d,38035,1731251730684 started 2024-11-10T15:15:31,919 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,919 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.Replication(171): b1c88e26310d,39231,1731251730735 started 2024-11-10T15:15:31,940 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,940 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,940 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,39231,1731251730735, RpcServer on b1c88e26310d/172.17.0.2:39231, sessionid=0x1010272dd660003 2024-11-10T15:15:31,940 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,43733,1731251730578, RpcServer on b1c88e26310d/172.17.0.2:43733, sessionid=0x1010272dd660001 2024-11-10T15:15:31,942 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:31,942 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:31,942 DEBUG [RS:2;b1c88e26310d:39231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,942 DEBUG [RS:0;b1c88e26310d:43733 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,942 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,39231,1731251730735' 2024-11-10T15:15:31,942 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,43733,1731251730578' 2024-11-10T15:15:31,942 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:31,942 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:31,943 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:31,943 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:31,944 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:31,944 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:31,944 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:31,944 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:31,944 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:31,944 DEBUG [RS:0;b1c88e26310d:43733 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,43733,1731251730578 2024-11-10T15:15:31,944 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,38035,1731251730684, RpcServer on b1c88e26310d/172.17.0.2:38035, sessionid=0x1010272dd660002 2024-11-10T15:15:31,944 DEBUG [RS:2;b1c88e26310d:39231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,39231,1731251730735 2024-11-10T15:15:31,944 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,43733,1731251730578' 2024-11-10T15:15:31,944 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,39231,1731251730735' 2024-11-10T15:15:31,944 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:31,944 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:31,944 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:31,945 DEBUG [RS:1;b1c88e26310d:38035 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,945 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,38035,1731251730684' 2024-11-10T15:15:31,945 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:31,945 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:31,945 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:31,946 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:31,946 DEBUG [RS:0;b1c88e26310d:43733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:31,946 INFO [RS:0;b1c88e26310d:43733 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:31,946 DEBUG [RS:2;b1c88e26310d:39231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:31,946 INFO [RS:0;b1c88e26310d:43733 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:31,946 INFO [RS:2;b1c88e26310d:39231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:31,946 INFO [RS:2;b1c88e26310d:39231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:31,947 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:31,947 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:31,947 DEBUG [RS:1;b1c88e26310d:38035 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,38035,1731251730684 2024-11-10T15:15:31,947 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,38035,1731251730684' 2024-11-10T15:15:31,947 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:31,948 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:31,949 DEBUG [RS:1;b1c88e26310d:38035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:31,949 INFO [RS:1;b1c88e26310d:38035 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:31,949 INFO [RS:1;b1c88e26310d:38035 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:31,970 WARN [b1c88e26310d:45947 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T15:15:32,052 INFO [RS:2;b1c88e26310d:39231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:15:32,052 INFO [RS:1;b1c88e26310d:38035 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:15:32,052 INFO [RS:0;b1c88e26310d:43733 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:15:32,056 INFO [RS:1;b1c88e26310d:38035 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C38035%2C1731251730684, suffix=, logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,38035,1731251730684, archiveDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs, maxLogs=32 2024-11-10T15:15:32,056 INFO [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C39231%2C1731251730735, suffix=, logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735, archiveDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs, maxLogs=32 2024-11-10T15:15:32,056 INFO [RS:0;b1c88e26310d:43733 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C43733%2C1731251730578, suffix=, logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,43733,1731251730578, archiveDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs, maxLogs=32 2024-11-10T15:15:32,078 DEBUG [RS:0;b1c88e26310d:43733 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,43733,1731251730578/b1c88e26310d%2C43733%2C1731251730578.1731251732062, exclude list is [], retry=0 2024-11-10T15:15:32,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43119,DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c,DISK] 2024-11-10T15:15:32,084 DEBUG [RS:1;b1c88e26310d:38035 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,38035,1731251730684/b1c88e26310d%2C38035%2C1731251730684.1731251732063, exclude list is [], retry=0 2024-11-10T15:15:32,084 DEBUG [RS:2;b1c88e26310d:39231 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735/b1c88e26310d%2C39231%2C1731251730735.1731251732063, exclude list is [], retry=0 2024-11-10T15:15:32,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42089,DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf,DISK] 2024-11-10T15:15:32,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39423,DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7,DISK] 2024-11-10T15:15:32,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39423,DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7,DISK] 2024-11-10T15:15:32,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39423,DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7,DISK] 2024-11-10T15:15:32,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43119,DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c,DISK] 2024-11-10T15:15:32,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42089,DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf,DISK] 2024-11-10T15:15:32,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42089,DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf,DISK] 2024-11-10T15:15:32,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43119,DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c,DISK] 2024-11-10T15:15:32,140 INFO [RS:0;b1c88e26310d:43733 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,43733,1731251730578/b1c88e26310d%2C43733%2C1731251730578.1731251732062 2024-11-10T15:15:32,141 DEBUG [RS:0;b1c88e26310d:43733 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44377:44377),(127.0.0.1/127.0.0.1:35317:35317),(127.0.0.1/127.0.0.1:36381:36381)] 2024-11-10T15:15:32,143 INFO [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735/b1c88e26310d%2C39231%2C1731251730735.1731251732063 2024-11-10T15:15:32,143 DEBUG [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35317:35317),(127.0.0.1/127.0.0.1:44377:44377),(127.0.0.1/127.0.0.1:36381:36381)] 2024-11-10T15:15:32,145 INFO [RS:1;b1c88e26310d:38035 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,38035,1731251730684/b1c88e26310d%2C38035%2C1731251730684.1731251732063 2024-11-10T15:15:32,146 DEBUG [RS:1;b1c88e26310d:38035 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36381:36381),(127.0.0.1/127.0.0.1:35317:35317),(127.0.0.1/127.0.0.1:44377:44377)] 2024-11-10T15:15:32,223 DEBUG [b1c88e26310d:45947 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T15:15:32,231 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(204): Hosts are {b1c88e26310d=0} racks are {/default-rack=0} 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:15:32,238 INFO [b1c88e26310d:45947 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:15:32,238 INFO [b1c88e26310d:45947 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:15:32,238 INFO [b1c88e26310d:45947 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:15:32,238 DEBUG [b1c88e26310d:45947 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:15:32,246 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b1c88e26310d,39231,1731251730735 2024-11-10T15:15:32,253 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b1c88e26310d,39231,1731251730735, state=OPENING 2024-11-10T15:15:32,258 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T15:15:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:32,261 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,261 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,261 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,261 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,263 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:15:32,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b1c88e26310d,39231,1731251730735}] 2024-11-10T15:15:32,442 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:15:32,444 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35817, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:15:32,457 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T15:15:32,458 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:15:32,458 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-10T15:15:32,461 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C39231%2C1731251730735.meta, suffix=.meta, logDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735, archiveDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs, maxLogs=32 2024-11-10T15:15:32,478 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735/b1c88e26310d%2C39231%2C1731251730735.meta.1731251732463.meta, exclude list is [], retry=0 2024-11-10T15:15:32,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39423,DS-4c118f26-e15e-43d2-b444-b1d8dc9964c7,DISK] 2024-11-10T15:15:32,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42089,DS-ed820ed7-b28c-49de-9cbc-56049bfbd4bf,DISK] 2024-11-10T15:15:32,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43119,DS-758158f8-9bf2-43ac-8f3a-ca87c04a668c,DISK] 2024-11-10T15:15:32,486 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/WALs/b1c88e26310d,39231,1731251730735/b1c88e26310d%2C39231%2C1731251730735.meta.1731251732463.meta 2024-11-10T15:15:32,486 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36381:36381),(127.0.0.1/127.0.0.1:35317:35317),(127.0.0.1/127.0.0.1:44377:44377)] 2024-11-10T15:15:32,486 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:32,488 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T15:15:32,491 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T15:15:32,496 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T15:15:32,501 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T15:15:32,501 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:32,502 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T15:15:32,502 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T15:15:32,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:15:32,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:15:32,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:32,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:32,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:15:32,509 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:15:32,509 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:32,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:32,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:15:32,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:15:32,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:32,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:32,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:15:32,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:15:32,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:32,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:32,515 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:15:32,516 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740 2024-11-10T15:15:32,518 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740 2024-11-10T15:15:32,521 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:15:32,521 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:15:32,522 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:32,525 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:15:32,526 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71266521, jitterRate=0.061953917145729065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:32,527 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T15:15:32,528 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731251732502Writing region info on filesystem at 1731251732502Initializing all the Stores at 1731251732504 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251732504Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251732504Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251732504Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251732504Cleaning up temporary data from old regions at 1731251732521 (+17 ms)Running coprocessor post-open hooks at 1731251732527 (+6 ms)Region opened successfully at 1731251732528 (+1 ms) 2024-11-10T15:15:32,536 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731251732433 2024-11-10T15:15:32,547 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T15:15:32,548 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T15:15:32,549 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b1c88e26310d,39231,1731251730735 2024-11-10T15:15:32,551 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b1c88e26310d,39231,1731251730735, state=OPEN 2024-11-10T15:15:32,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:32,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:32,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:32,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:32,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:32,555 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b1c88e26310d,39231,1731251730735 2024-11-10T15:15:32,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T15:15:32,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b1c88e26310d,39231,1731251730735 in 291 msec 2024-11-10T15:15:32,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T15:15:32,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 758 msec 2024-11-10T15:15:32,568 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:32,568 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T15:15:32,591 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:15:32,593 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b1c88e26310d,39231,1731251730735, seqNum=-1] 2024-11-10T15:15:32,622 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:32,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48553, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:32,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0510 sec 2024-11-10T15:15:32,678 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731251732678, completionTime=-1 2024-11-10T15:15:32,683 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T15:15:32,683 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T15:15:32,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T15:15:32,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731251792715 2024-11-10T15:15:32,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731251852715 2024-11-10T15:15:32,715 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 32 msec 2024-11-10T15:15:32,717 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-10T15:15:32,726 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,726 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,726 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,728 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b1c88e26310d:45947, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,729 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,729 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,735 DEBUG [master/b1c88e26310d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T15:15:32,760 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.953sec 2024-11-10T15:15:32,762 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T15:15:32,763 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T15:15:32,765 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T15:15:32,765 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T15:15:32,765 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T15:15:32,766 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:32,767 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T15:15:32,771 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T15:15:32,772 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T15:15:32,773 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45947,1731251729890-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:32,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e0c3f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:32,793 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T15:15:32,793 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T15:15:32,797 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b1c88e26310d,45947,-1 for getting cluster id 2024-11-10T15:15:32,800 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T15:15:32,808 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '912af603-23e0-4ee7-8440-0ba37488408e' 2024-11-10T15:15:32,811 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T15:15:32,811 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "912af603-23e0-4ee7-8440-0ba37488408e" 2024-11-10T15:15:32,813 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aeeb58e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:32,813 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b1c88e26310d,45947,-1] 2024-11-10T15:15:32,816 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T15:15:32,817 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:32,819 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33854, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T15:15:32,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f28ba97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:32,822 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:15:32,829 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b1c88e26310d,39231,1731251730735, seqNum=-1] 2024-11-10T15:15:32,829 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:32,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:32,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b1c88e26310d,45947,1731251729890 2024-11-10T15:15:32,857 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T15:15:32,862 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b1c88e26310d,45947,1731251729890 2024-11-10T15:15:32,864 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@449c6f77 2024-11-10T15:15:32,865 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T15:15:32,867 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33860, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T15:15:32,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:15:32,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T15:15:32,882 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T15:15:32,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T15:15:32,885 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:32,888 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T15:15:32,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:32,896 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:32,896 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:32,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57198 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57198 dst: /127.0.0.1:39423 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:32,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-10T15:15:32,907 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:32,910 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f00b23e47ba849e123b0ce52383bc8d2, NAME => 'TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9 2024-11-10T15:15:32,915 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:32,916 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:32,920 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:50914 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50914 dst: /127.0.0.1:42089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:32,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-10T15:15:32,927 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing f00b23e47ba849e123b0ce52383bc8d2, disabling compactions & flushes 2024-11-10T15:15:32,928 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. after waiting 0 ms 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:32,928 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:32,928 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for f00b23e47ba849e123b0ce52383bc8d2: Waiting for close lock at 1731251732928Disabling compacts and flushes for region at 1731251732928Disabling writes for close at 1731251732928Writing region close event to WAL at 1731251732928Closed at 1731251732928 2024-11-10T15:15:32,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T15:15:32,938 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731251732931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731251732931"}]},"ts":"1731251732931"} 2024-11-10T15:15:32,947 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T15:15:32,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T15:15:32,952 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731251732949"}]},"ts":"1731251732949"} 2024-11-10T15:15:32,956 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T15:15:32,957 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b1c88e26310d=0} racks are {/default-rack=0} 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:15:32,958 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:15:32,958 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:15:32,958 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:15:32,958 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:15:32,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f00b23e47ba849e123b0ce52383bc8d2, ASSIGN}] 2024-11-10T15:15:32,962 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f00b23e47ba849e123b0ce52383bc8d2, ASSIGN 2024-11-10T15:15:32,964 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f00b23e47ba849e123b0ce52383bc8d2, ASSIGN; state=OFFLINE, location=b1c88e26310d,38035,1731251730684; forceNewPlan=false, retain=false 2024-11-10T15:15:33,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:33,117 INFO [b1c88e26310d:45947 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T15:15:33,118 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f00b23e47ba849e123b0ce52383bc8d2, regionState=OPENING, regionLocation=b1c88e26310d,38035,1731251730684 2024-11-10T15:15:33,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f00b23e47ba849e123b0ce52383bc8d2, ASSIGN because future has completed 2024-11-10T15:15:33,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f00b23e47ba849e123b0ce52383bc8d2, server=b1c88e26310d,38035,1731251730684}] 2024-11-10T15:15:33,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:33,278 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:15:33,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46515, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:15:33,286 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:33,287 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f00b23e47ba849e123b0ce52383bc8d2, NAME => 'TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:33,287 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,287 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:33,287 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,287 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,289 INFO [StoreOpener-f00b23e47ba849e123b0ce52383bc8d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,292 INFO [StoreOpener-f00b23e47ba849e123b0ce52383bc8d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f00b23e47ba849e123b0ce52383bc8d2 columnFamilyName cf 2024-11-10T15:15:33,292 DEBUG [StoreOpener-f00b23e47ba849e123b0ce52383bc8d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:33,293 INFO [StoreOpener-f00b23e47ba849e123b0ce52383bc8d2-1 {}] regionserver.HStore(327): Store=f00b23e47ba849e123b0ce52383bc8d2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:33,293 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,294 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,295 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,296 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,296 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,298 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,303 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:33,303 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f00b23e47ba849e123b0ce52383bc8d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64363333, jitterRate=-0.04091159999370575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T15:15:33,304 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:33,305 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f00b23e47ba849e123b0ce52383bc8d2: Running coprocessor pre-open hook at 1731251733287Writing region info on filesystem at 1731251733287Initializing all the Stores at 1731251733289 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251733289Cleaning up temporary data from old regions at 1731251733296 (+7 ms)Running coprocessor post-open hooks at 1731251733304 (+8 ms)Region opened successfully at 1731251733305 (+1 ms) 2024-11-10T15:15:33,307 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2., pid=6, masterSystemTime=1731251733277 2024-11-10T15:15:33,310 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:33,310 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:33,311 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f00b23e47ba849e123b0ce52383bc8d2, regionState=OPEN, openSeqNum=2, regionLocation=b1c88e26310d,38035,1731251730684 2024-11-10T15:15:33,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f00b23e47ba849e123b0ce52383bc8d2, server=b1c88e26310d,38035,1731251730684 because future has completed 2024-11-10T15:15:33,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T15:15:33,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f00b23e47ba849e123b0ce52383bc8d2, server=b1c88e26310d,38035,1731251730684 in 194 msec 2024-11-10T15:15:33,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T15:15:33,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f00b23e47ba849e123b0ce52383bc8d2, ASSIGN in 360 msec 2024-11-10T15:15:33,326 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T15:15:33,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731251733326"}]},"ts":"1731251733326"} 2024-11-10T15:15:33,329 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T15:15:33,331 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T15:15:33,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 456 msec 2024-11-10T15:15:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:33,527 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:15:33,527 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T15:15:33,528 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:15:33,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T15:15:33,534 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:15:33,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T15:15:33,542 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2., hostname=b1c88e26310d,38035,1731251730684, seqNum=2] 2024-11-10T15:15:33,544 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:33,546 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56444, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:33,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-10T15:15:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T15:15:33,561 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T15:15:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:33,563 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T15:15:33,564 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T15:15:33,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:33,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38035 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T15:15:33,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:33,731 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing f00b23e47ba849e123b0ce52383bc8d2 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T15:15:33,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/.tmp/cf/f42a3cc3701c40d4b5ce44ff53eca54f is 36, key is row/cf:cq/1731251733547/Put/seqid=0 2024-11-10T15:15:33,792 WARN [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:33,792 WARN [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:33,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1122505898_22 at /127.0.0.1:50928 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:42089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50928 dst: /127.0.0.1:42089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:33,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-10T15:15:33,801 WARN [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:33,801 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/.tmp/cf/f42a3cc3701c40d4b5ce44ff53eca54f 2024-11-10T15:15:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/.tmp/cf/f42a3cc3701c40d4b5ce44ff53eca54f as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/cf/f42a3cc3701c40d4b5ce44ff53eca54f 2024-11-10T15:15:33,862 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/cf/f42a3cc3701c40d4b5ce44ff53eca54f, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T15:15:33,872 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for f00b23e47ba849e123b0ce52383bc8d2 in 137ms, sequenceid=5, compaction requested=false 2024-11-10T15:15:33,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-10T15:15:33,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for f00b23e47ba849e123b0ce52383bc8d2: 2024-11-10T15:15:33,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:33,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:33,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T15:15:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T15:15:33,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T15:15:33,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 318 msec 2024-11-10T15:15:33,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 331 msec 2024-11-10T15:15:34,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:34,187 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:15:34,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T15:15:34,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:15:34,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:34,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,207 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T15:15:34,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T15:15:34,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=476640655, stopped=false 2024-11-10T15:15:34,208 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b1c88e26310d,45947,1731251729890 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:34,210 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:34,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:34,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:34,211 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:15:34,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:34,211 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:34,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:34,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:34,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,43733,1731251730578' ***** 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,38035,1731251730684' ***** 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,39231,1731251730735' ***** 2024-11-10T15:15:34,213 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:34,213 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:34,213 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:34,213 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:34,213 INFO [RS:0;b1c88e26310d:43733 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:34,213 INFO [RS:2;b1c88e26310d:39231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:34,214 INFO [RS:0;b1c88e26310d:43733 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:34,214 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,43733,1731251730578 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,39231,1731251730735 2024-11-10T15:15:34,214 INFO [RS:0;b1c88e26310d:43733 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:34,214 INFO [RS:0;b1c88e26310d:43733 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b1c88e26310d:43733. 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b1c88e26310d:39231. 2024-11-10T15:15:34,214 DEBUG [RS:0;b1c88e26310d:43733 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:34,214 DEBUG [RS:0;b1c88e26310d:43733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,214 DEBUG [RS:2;b1c88e26310d:39231 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:34,214 DEBUG [RS:2;b1c88e26310d:39231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,214 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,43733,1731251730578; all regions closed. 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:34,214 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:34,215 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T15:15:34,215 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:34,215 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:34,215 INFO [RS:1;b1c88e26310d:38035 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:34,215 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:15:34,215 INFO [RS:1;b1c88e26310d:38035 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:34,216 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T15:15:34,216 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(3091): Received CLOSE for f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:34,216 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:34,216 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:15:34,216 DEBUG [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T15:15:34,216 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,38035,1731251730684 2024-11-10T15:15:34,216 INFO [RS:1;b1c88e26310d:38035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:34,216 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:15:34,216 INFO [RS:1;b1c88e26310d:38035 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b1c88e26310d:38035. 2024-11-10T15:15:34,216 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:15:34,216 DEBUG [RS:1;b1c88e26310d:38035 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:34,216 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:15:34,216 DEBUG [RS:1;b1c88e26310d:38035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,216 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:15:34,216 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:15:34,216 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f00b23e47ba849e123b0ce52383bc8d2, disabling compactions & flushes 2024-11-10T15:15:34,216 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1325): Online Regions={f00b23e47ba849e123b0ce52383bc8d2=TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2.} 2024-11-10T15:15:34,216 INFO [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:34,217 DEBUG [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1351): Waiting on f00b23e47ba849e123b0ce52383bc8d2 2024-11-10T15:15:34,217 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:34,217 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T15:15:34,217 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. after waiting 0 ms 2024-11-10T15:15:34,217 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:15:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:15:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:15:34,228 DEBUG [RS:0;b1c88e26310d:43733 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs 2024-11-10T15:15:34,229 INFO [RS:0;b1c88e26310d:43733 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b1c88e26310d%2C43733%2C1731251730578:(num 1731251732062) 2024-11-10T15:15:34,229 DEBUG [RS:0;b1c88e26310d:43733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,229 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,229 INFO [RS:0;b1c88e26310d:43733 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:34,229 INFO [RS:0;b1c88e26310d:43733 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:34,230 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:34,230 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:34,230 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:34,230 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:34,230 INFO [RS:0;b1c88e26310d:43733 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:34,230 INFO [RS:0;b1c88e26310d:43733 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43733 2024-11-10T15:15:34,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:34,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,43733,1731251730578 2024-11-10T15:15:34,234 INFO [RS:0;b1c88e26310d:43733 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:34,236 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,43733,1731251730578] 2024-11-10T15:15:34,237 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/default/TestHBaseWalOnEC/f00b23e47ba849e123b0ce52383bc8d2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T15:15:34,238 INFO [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:34,238 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f00b23e47ba849e123b0ce52383bc8d2: Waiting for close lock at 1731251734216Running coprocessor pre-close hooks at 1731251734216Disabling compacts and flushes for region at 1731251734216Disabling writes for close at 1731251734217 (+1 ms)Writing region close event to WAL at 1731251734222 (+5 ms)Running coprocessor post-close hooks at 1731251734238 (+16 ms)Closed at 1731251734238 2024-11-10T15:15:34,239 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2. 2024-11-10T15:15:34,242 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,43733,1731251730578 already deleted, retry=false 2024-11-10T15:15:34,242 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,43733,1731251730578 expired; onlineServers=2 2024-11-10T15:15:34,252 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/info/59a5f0dcdb09465bb0df2e427e7c3675 is 153, key is TestHBaseWalOnEC,,1731251732869.f00b23e47ba849e123b0ce52383bc8d2./info:regioninfo/1731251733311/Put/seqid=0 2024-11-10T15:15:34,256 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,256 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_580283831_22 at /127.0.0.1:57306 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57306 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-10T15:15:34,266 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,266 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/info/59a5f0dcdb09465bb0df2e427e7c3675 2024-11-10T15:15:34,296 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/ns/76c86c7fb8114b889a1489a8e9d47c5d is 43, key is default/ns:d/1731251732629/Put/seqid=0 2024-11-10T15:15:34,298 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,298 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,298 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,298 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,301 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_580283831_22 at /127.0.0.1:57328 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57328 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-10T15:15:34,309 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,309 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/ns/76c86c7fb8114b889a1489a8e9d47c5d 2024-11-10T15:15:34,336 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/table/9db1ddccd36947d7a07656528cae441a is 52, key is TestHBaseWalOnEC/table:state/1731251733326/Put/seqid=0 2024-11-10T15:15:34,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43733-0x1010272dd660001, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,338 INFO [RS:0;b1c88e26310d:43733 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:34,338 INFO [RS:0;b1c88e26310d:43733 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,43733,1731251730578; zookeeper connection closed. 2024-11-10T15:15:34,338 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75ba4e84 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75ba4e84 2024-11-10T15:15:34,339 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,339 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_580283831_22 at /127.0.0.1:57340 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57340 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-10T15:15:34,347 WARN [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,348 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/table/9db1ddccd36947d7a07656528cae441a 2024-11-10T15:15:34,360 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/info/59a5f0dcdb09465bb0df2e427e7c3675 as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/info/59a5f0dcdb09465bb0df2e427e7c3675 2024-11-10T15:15:34,371 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/info/59a5f0dcdb09465bb0df2e427e7c3675, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T15:15:34,373 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/ns/76c86c7fb8114b889a1489a8e9d47c5d as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/ns/76c86c7fb8114b889a1489a8e9d47c5d 2024-11-10T15:15:34,382 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/ns/76c86c7fb8114b889a1489a8e9d47c5d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T15:15:34,383 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/.tmp/table/9db1ddccd36947d7a07656528cae441a as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/table/9db1ddccd36947d7a07656528cae441a 2024-11-10T15:15:34,393 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/table/9db1ddccd36947d7a07656528cae441a, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T15:15:34,395 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 179ms, sequenceid=11, compaction requested=false 2024-11-10T15:15:34,395 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T15:15:34,405 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T15:15:34,406 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T15:15:34,406 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:34,406 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731251734215Running coprocessor pre-close hooks at 1731251734216 (+1 ms)Disabling compacts and flushes for region at 1731251734216Disabling writes for close at 1731251734216Obtaining lock to block concurrent updates at 1731251734217 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731251734217Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731251734218 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731251734219 (+1 ms)Flushing 1588230740/info: creating writer at 1731251734219Flushing 1588230740/info: appending metadata at 1731251734248 (+29 ms)Flushing 1588230740/info: closing flushed file at 1731251734248Flushing 1588230740/ns: creating writer at 1731251734278 (+30 ms)Flushing 1588230740/ns: appending metadata at 1731251734295 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731251734295Flushing 1588230740/table: creating writer at 1731251734318 (+23 ms)Flushing 1588230740/table: appending metadata at 1731251734335 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731251734335Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d5ba00d: reopening flushed file at 1731251734359 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16caebf2: reopening flushed file at 1731251734372 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cd24308: reopening flushed file at 1731251734382 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 179ms, sequenceid=11, compaction requested=false at 1731251734395 (+13 ms)Writing region close event to WAL at 1731251734398 (+3 ms)Running coprocessor post-close hooks at 1731251734406 (+8 ms)Closed at 1731251734406 2024-11-10T15:15:34,407 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:34,416 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,39231,1731251730735; all regions closed. 2024-11-10T15:15:34,417 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,38035,1731251730684; all regions closed. 2024-11-10T15:15:34,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:15:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_1073741827_1017 (size=1298) 2024-11-10T15:15:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:15:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:15:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_1073741827_1017 (size=1298) 2024-11-10T15:15:34,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_1073741827_1017 (size=1298) 2024-11-10T15:15:34,425 DEBUG [RS:1;b1c88e26310d:38035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs 2024-11-10T15:15:34,425 INFO [RS:1;b1c88e26310d:38035 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b1c88e26310d%2C38035%2C1731251730684:(num 1731251732063) 2024-11-10T15:15:34,425 DEBUG [RS:1;b1c88e26310d:38035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,425 DEBUG [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs 2024-11-10T15:15:34,425 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,425 INFO [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b1c88e26310d%2C39231%2C1731251730735.meta:.meta(num 1731251732463) 2024-11-10T15:15:34,425 INFO [RS:1;b1c88e26310d:38035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:34,425 INFO [RS:1;b1c88e26310d:38035 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:34,426 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:34,426 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:34,426 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:34,426 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:34,426 INFO [RS:1;b1c88e26310d:38035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:34,426 INFO [RS:1;b1c88e26310d:38035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38035 2024-11-10T15:15:34,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,38035,1731251730684 2024-11-10T15:15:34,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:34,428 INFO [RS:1;b1c88e26310d:38035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:34,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_1073741828_1018 (size=93) 2024-11-10T15:15:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_1073741828_1018 (size=93) 2024-11-10T15:15:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_1073741828_1018 (size=93) 2024-11-10T15:15:34,430 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,38035,1731251730684] 2024-11-10T15:15:34,432 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,38035,1731251730684 already deleted, retry=false 2024-11-10T15:15:34,432 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,38035,1731251730684 expired; onlineServers=1 2024-11-10T15:15:34,432 DEBUG [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/oldWALs 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b1c88e26310d%2C39231%2C1731251730735:(num 1731251732063) 2024-11-10T15:15:34,433 DEBUG [RS:2;b1c88e26310d:39231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:34,433 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:34,433 INFO [RS:2;b1c88e26310d:39231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39231 2024-11-10T15:15:34,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:34,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,39231,1731251730735 2024-11-10T15:15:34,438 INFO [RS:2;b1c88e26310d:39231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:34,440 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,39231,1731251730735] 2024-11-10T15:15:34,441 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,39231,1731251730735 already deleted, retry=false 2024-11-10T15:15:34,441 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,39231,1731251730735 expired; onlineServers=0 2024-11-10T15:15:34,441 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b1c88e26310d,45947,1731251729890' ***** 2024-11-10T15:15:34,441 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T15:15:34,441 INFO [M:0;b1c88e26310d:45947 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:34,442 INFO [M:0;b1c88e26310d:45947 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:34,442 DEBUG [M:0;b1c88e26310d:45947 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T15:15:34,442 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T15:15:34,442 DEBUG [M:0;b1c88e26310d:45947 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T15:15:34,442 DEBUG [master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251731712 {}] cleaner.HFileCleaner(306): Exit Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251731712,5,FailOnTimeoutGroup] 2024-11-10T15:15:34,442 DEBUG [master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251731714 {}] cleaner.HFileCleaner(306): Exit Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251731714,5,FailOnTimeoutGroup] 2024-11-10T15:15:34,442 INFO [M:0;b1c88e26310d:45947 {}] hbase.ChoreService(370): Chore service for: master/b1c88e26310d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:34,443 INFO [M:0;b1c88e26310d:45947 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:34,443 DEBUG [M:0;b1c88e26310d:45947 {}] master.HMaster(1795): Stopping service threads 2024-11-10T15:15:34,443 INFO [M:0;b1c88e26310d:45947 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T15:15:34,443 INFO [M:0;b1c88e26310d:45947 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:15:34,443 INFO [M:0;b1c88e26310d:45947 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T15:15:34,443 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T15:15:34,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:34,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:34,445 DEBUG [M:0;b1c88e26310d:45947 {}] zookeeper.ZKUtil(347): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T15:15:34,445 WARN [M:0;b1c88e26310d:45947 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T15:15:34,446 INFO [M:0;b1c88e26310d:45947 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/.lastflushedseqids 2024-11-10T15:15:34,455 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,455 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,459 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57358 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57358 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-10T15:15:34,463 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,463 INFO [M:0;b1c88e26310d:45947 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T15:15:34,463 INFO [M:0;b1c88e26310d:45947 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T15:15:34,464 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:15:34,464 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:34,464 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:34,464 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:15:34,464 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:34,464 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-10T15:15:34,483 DEBUG [M:0;b1c88e26310d:45947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/27738bcae2a048e582b61c53fae4d8f1 is 82, key is hbase:meta,,1/info:regioninfo/1731251732549/Put/seqid=0 2024-11-10T15:15:34,486 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,486 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57218 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57218 dst: /127.0.0.1:39423 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-10T15:15:34,493 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,493 INFO [M:0;b1c88e26310d:45947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/27738bcae2a048e582b61c53fae4d8f1 2024-11-10T15:15:34,521 DEBUG [M:0;b1c88e26310d:45947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/681d7e80f57a41059632791b80ccd800 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731251733333/Put/seqid=0 2024-11-10T15:15:34,524 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,524 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,527 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:57386 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57386 dst: /127.0.0.1:43119 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,530 INFO [RS:1;b1c88e26310d:38035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38035-0x1010272dd660002, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,530 INFO [RS:1;b1c88e26310d:38035 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,38035,1731251730684; zookeeper connection closed. 2024-11-10T15:15:34,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_-9223372036854775552_1037 (size=6441) 2024-11-10T15:15:34,531 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32f6b034 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32f6b034 2024-11-10T15:15:34,531 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,532 INFO [M:0;b1c88e26310d:45947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/681d7e80f57a41059632791b80ccd800 2024-11-10T15:15:34,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,540 INFO [RS:2;b1c88e26310d:39231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:34,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1010272dd660003, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,540 INFO [RS:2;b1c88e26310d:39231 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,39231,1731251730735; zookeeper connection closed. 2024-11-10T15:15:34,540 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a4d510f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a4d510f 2024-11-10T15:15:34,541 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T15:15:34,556 DEBUG [M:0;b1c88e26310d:45947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a9ebc818725d49b2bfb75d11cac2c0ca is 69, key is b1c88e26310d,38035,1731251730684/rs:state/1731251731796/Put/seqid=0 2024-11-10T15:15:34,558 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,558 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:15:34,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2475908_22 at /127.0.0.1:50944 [Receiving block BP-100759265-172.17.0.2-1731251726743:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:42089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50944 dst: /127.0.0.1:42089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:15:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-10T15:15:34,565 WARN [M:0;b1c88e26310d:45947 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:15:34,565 INFO [M:0;b1c88e26310d:45947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a9ebc818725d49b2bfb75d11cac2c0ca 2024-11-10T15:15:34,575 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/27738bcae2a048e582b61c53fae4d8f1 as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/27738bcae2a048e582b61c53fae4d8f1 2024-11-10T15:15:34,583 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/27738bcae2a048e582b61c53fae4d8f1, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T15:15:34,584 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/681d7e80f57a41059632791b80ccd800 as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/681d7e80f57a41059632791b80ccd800 2024-11-10T15:15:34,595 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/681d7e80f57a41059632791b80ccd800, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T15:15:34,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-10T15:15:34,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-10T15:15:34,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-10T15:15:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-10T15:15:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-10T15:15:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-10T15:15:34,598 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a9ebc818725d49b2bfb75d11cac2c0ca as hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a9ebc818725d49b2bfb75d11cac2c0ca 2024-11-10T15:15:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-10T15:15:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-10T15:15:34,605 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a9ebc818725d49b2bfb75d11cac2c0ca, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T15:15:34,607 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=72, compaction requested=false 2024-11-10T15:15:34,609 INFO [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:34,609 DEBUG [M:0;b1c88e26310d:45947 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731251734464Disabling compacts and flushes for region at 1731251734464Disabling writes for close at 1731251734464Obtaining lock to block concurrent updates at 1731251734464Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731251734464Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1731251734464Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731251734465 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731251734465Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731251734483 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731251734483Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731251734502 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731251734521 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731251734521Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731251734540 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731251734556 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731251734556Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bb33d15: reopening flushed file at 1731251734574 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@538e25: reopening flushed file at 1731251734583 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c6af5b7: reopening flushed file at 1731251734596 (+13 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=72, compaction requested=false at 1731251734607 (+11 ms)Writing region close event to WAL at 1731251734609 (+2 ms)Closed at 1731251734609 2024-11-10T15:15:34,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42089 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:15:34,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39423 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:15:34,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43119 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:15:34,613 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:34,613 INFO [M:0;b1c88e26310d:45947 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T15:15:34,613 INFO [M:0;b1c88e26310d:45947 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45947 2024-11-10T15:15:34,614 INFO [M:0;b1c88e26310d:45947 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:34,719 INFO [M:0;b1c88e26310d:45947 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:34,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45947-0x1010272dd660000, quorum=127.0.0.1:59868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:34,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:34,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:34,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:34,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:34,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:34,728 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:34,728 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:34,728 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:34,728 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-100759265-172.17.0.2-1731251726743 (Datanode Uuid 327d567e-824f-4be2-b360-cac504a36acf) service to localhost/127.0.0.1:33323 2024-11-10T15:15:34,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data5/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,730 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data6/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,730 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:34,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:34,732 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:34,733 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:34,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:34,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:34,734 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:34,734 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:34,734 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-100759265-172.17.0.2-1731251726743 (Datanode Uuid 2e40fac8-c13e-486f-8bc7-17b44f8dce3f) service to localhost/127.0.0.1:33323 2024-11-10T15:15:34,734 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:34,735 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data3/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,735 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data4/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,735 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:34,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:34,738 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:34,738 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:34,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:34,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:34,739 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:34,739 WARN [BP-100759265-172.17.0.2-1731251726743 heartbeating to localhost/127.0.0.1:33323 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-100759265-172.17.0.2-1731251726743 (Datanode Uuid a2216ef1-02f6-440c-baa5-89e80dde5fef) service to localhost/127.0.0.1:33323 2024-11-10T15:15:34,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:34,740 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:34,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data1/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/cluster_32013011-b160-e423-334f-0f3046a2abc0/data/data2/current/BP-100759265-172.17.0.2-1731251726743 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:34,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:34,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:15:34,750 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:34,750 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:34,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:34,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:34,759 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T15:15:34,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T15:15:34,795 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 155), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 32) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9238 (was 9522) 2024-11-10T15:15:34,801 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=9238 2024-11-10T15:15:34,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.log.dir so I do NOT create it in target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e5e9089-75d7-6ba0-d344-0b39ed43b2f0/hadoop.tmp.dir so I do NOT create it in target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7, deleteOnExit=true 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/test.cache.data in system properties and HBase conf 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T15:15:34,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir in system properties and HBase conf 2024-11-10T15:15:34,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T15:15:34,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T15:15:34,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T15:15:34,803 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T15:15:34,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:15:34,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/nfs.dump.dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/java.io.tmpdir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T15:15:34,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T15:15:34,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:34,899 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:34,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:34,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:34,901 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:15:34,901 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:34,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@130ce80d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:34,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1921d73d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:35,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d30adc2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/java.io.tmpdir/jetty-localhost-44207-hadoop-hdfs-3_4_1-tests_jar-_-any-14015892008740146395/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:15:35,021 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3df483b8{HTTP/1.1, (http/1.1)}{localhost:44207} 2024-11-10T15:15:35,021 INFO [Time-limited test {}] server.Server(415): Started @10239ms 2024-11-10T15:15:35,111 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:35,115 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:35,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:35,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:35,116 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:15:35,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6980ca59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:35,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c867c3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:35,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17794d45{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/java.io.tmpdir/jetty-localhost-39985-hadoop-hdfs-3_4_1-tests_jar-_-any-11045100768903536578/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:35,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36d1058{HTTP/1.1, (http/1.1)}{localhost:39985} 2024-11-10T15:15:35,233 INFO [Time-limited test {}] server.Server(415): Started @10451ms 2024-11-10T15:15:35,234 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:35,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:35,273 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:35,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:35,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:35,275 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:15:35,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ed2e3e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:35,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a71642{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:35,332 WARN [Thread-520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data1/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,332 WARN [Thread-521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data2/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,349 WARN [Thread-499 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x444732903473ad52 with lease ID 0xb3f7ddbdd8fda60f: Processing first storage report for DS-6602cd59-2cd9-4ec2-9fd3-d8fb33e7a923 from datanode DatanodeRegistration(127.0.0.1:38487, datanodeUuid=f9220acd-c87e-44b6-bebb-62f5e956296e, infoPort=46067, infoSecurePort=0, ipcPort=40311, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x444732903473ad52 with lease ID 0xb3f7ddbdd8fda60f: from storage DS-6602cd59-2cd9-4ec2-9fd3-d8fb33e7a923 node DatanodeRegistration(127.0.0.1:38487, datanodeUuid=f9220acd-c87e-44b6-bebb-62f5e956296e, infoPort=46067, infoSecurePort=0, ipcPort=40311, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x444732903473ad52 with lease ID 0xb3f7ddbdd8fda60f: Processing first storage report for DS-1eae6b90-296a-46a6-a3e5-e3539e824f36 from datanode DatanodeRegistration(127.0.0.1:38487, datanodeUuid=f9220acd-c87e-44b6-bebb-62f5e956296e, infoPort=46067, infoSecurePort=0, ipcPort=40311, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x444732903473ad52 with lease ID 0xb3f7ddbdd8fda60f: from storage DS-1eae6b90-296a-46a6-a3e5-e3539e824f36 node DatanodeRegistration(127.0.0.1:38487, datanodeUuid=f9220acd-c87e-44b6-bebb-62f5e956296e, infoPort=46067, infoSecurePort=0, ipcPort=40311, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@365855b2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/java.io.tmpdir/jetty-localhost-33923-hadoop-hdfs-3_4_1-tests_jar-_-any-14860054850611909697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:35,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b9c4315{HTTP/1.1, (http/1.1)}{localhost:33923} 2024-11-10T15:15:35,398 INFO [Time-limited test {}] server.Server(415): Started @10616ms 2024-11-10T15:15:35,399 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:35,430 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:15:35,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:15:35,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:15:35,435 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:15:35,435 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:15:35,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@381275e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:15:35,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3beb2b8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:15:35,516 WARN [Thread-556 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data4/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,516 WARN [Thread-555 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data3/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,533 WARN [Thread-535 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:35,536 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf1a962559a80ed9 with lease ID 0xb3f7ddbdd8fda610: Processing first storage report for DS-abe90634-cd99-4dc2-b023-6dec7c5fe64d from datanode DatanodeRegistration(127.0.0.1:37183, datanodeUuid=36c55064-dfbb-4619-85fb-6331386546c4, infoPort=35261, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,536 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf1a962559a80ed9 with lease ID 0xb3f7ddbdd8fda610: from storage DS-abe90634-cd99-4dc2-b023-6dec7c5fe64d node DatanodeRegistration(127.0.0.1:37183, datanodeUuid=36c55064-dfbb-4619-85fb-6331386546c4, infoPort=35261, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,536 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf1a962559a80ed9 with lease ID 0xb3f7ddbdd8fda610: Processing first storage report for DS-78898e16-5e65-4109-9d0d-3a2b496f95ec from datanode DatanodeRegistration(127.0.0.1:37183, datanodeUuid=36c55064-dfbb-4619-85fb-6331386546c4, infoPort=35261, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,536 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf1a962559a80ed9 with lease ID 0xb3f7ddbdd8fda610: from storage DS-78898e16-5e65-4109-9d0d-3a2b496f95ec node DatanodeRegistration(127.0.0.1:37183, datanodeUuid=36c55064-dfbb-4619-85fb-6331386546c4, infoPort=35261, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@523e382d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/java.io.tmpdir/jetty-localhost-46883-hadoop-hdfs-3_4_1-tests_jar-_-any-10919716683037405662/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:35,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cdad191{HTTP/1.1, (http/1.1)}{localhost:46883} 2024-11-10T15:15:35,568 INFO [Time-limited test {}] server.Server(415): Started @10786ms 2024-11-10T15:15:35,569 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:15:35,671 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data6/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,671 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data5/current/BP-616955154-172.17.0.2-1731251734838/current, will proceed with Du for space computation calculation, 2024-11-10T15:15:35,693 WARN [Thread-570 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:15:35,696 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x893d098124132a7f with lease ID 0xb3f7ddbdd8fda611: Processing first storage report for DS-a12c0518-fa80-42ab-ad0e-e7cebef66546 from datanode DatanodeRegistration(127.0.0.1:33377, datanodeUuid=79f01456-2dab-42b3-8901-16231fc8a479, infoPort=46061, infoSecurePort=0, ipcPort=35947, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x893d098124132a7f with lease ID 0xb3f7ddbdd8fda611: from storage DS-a12c0518-fa80-42ab-ad0e-e7cebef66546 node DatanodeRegistration(127.0.0.1:33377, datanodeUuid=79f01456-2dab-42b3-8901-16231fc8a479, infoPort=46061, infoSecurePort=0, ipcPort=35947, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,696 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x893d098124132a7f with lease ID 0xb3f7ddbdd8fda611: Processing first storage report for DS-26fcfd9b-48aa-4b9d-9e07-8790f7f42fec from datanode DatanodeRegistration(127.0.0.1:33377, datanodeUuid=79f01456-2dab-42b3-8901-16231fc8a479, infoPort=46061, infoSecurePort=0, ipcPort=35947, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838) 2024-11-10T15:15:35,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x893d098124132a7f with lease ID 0xb3f7ddbdd8fda611: from storage DS-26fcfd9b-48aa-4b9d-9e07-8790f7f42fec node DatanodeRegistration(127.0.0.1:33377, datanodeUuid=79f01456-2dab-42b3-8901-16231fc8a479, infoPort=46061, infoSecurePort=0, ipcPort=35947, storageInfo=lv=-57;cid=testClusterID;nsid=541182265;c=1731251734838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:15:35,795 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d 2024-11-10T15:15:35,798 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/zookeeper_0, clientPort=58757, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T15:15:35,799 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58757 2024-11-10T15:15:35,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:15:35,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:15:35,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:15:35,816 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 with version=8 2024-11-10T15:15:35,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33323/user/jenkins/test-data/b3ef666d-4179-852d-810b-109e0bf256c9/hbase-staging 2024-11-10T15:15:35,818 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:35,818 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T15:15:35,819 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:35,820 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33459 2024-11-10T15:15:35,821 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33459 connecting to ZooKeeper ensemble=127.0.0.1:58757 2024-11-10T15:15:35,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334590x0, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:35,829 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33459-0x1010272f7850000 connected 2024-11-10T15:15:35,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:35,849 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8, hbase.cluster.distributed=false 2024-11-10T15:15:35,850 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:35,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-10T15:15:35,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33459 2024-11-10T15:15:35,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33459 2024-11-10T15:15:35,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-10T15:15:35,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-10T15:15:35,868 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:35,868 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:35,869 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45831 2024-11-10T15:15:35,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45831 connecting to ZooKeeper ensemble=127.0.0.1:58757 2024-11-10T15:15:35,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458310x0, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:35,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45831-0x1010272f7850001 connected 2024-11-10T15:15:35,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:35,878 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:35,879 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:35,879 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:35,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:35,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45831 2024-11-10T15:15:35,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45831 2024-11-10T15:15:35,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45831 2024-11-10T15:15:35,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45831 2024-11-10T15:15:35,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45831 2024-11-10T15:15:35,896 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:35,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,896 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:35,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:35,897 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:35,897 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:35,897 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36773 2024-11-10T15:15:35,898 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36773 connecting to ZooKeeper ensemble=127.0.0.1:58757 2024-11-10T15:15:35,899 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367730x0, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:35,905 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36773-0x1010272f7850002 connected 2024-11-10T15:15:35,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:35,905 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:35,905 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:35,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:35,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:35,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36773 2024-11-10T15:15:35,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36773 2024-11-10T15:15:35,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36773 2024-11-10T15:15:35,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36773 2024-11-10T15:15:35,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36773 2024-11-10T15:15:35,923 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b1c88e26310d:0 server-side Connection retries=45 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:15:35,923 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:15:35,924 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36993 2024-11-10T15:15:35,925 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36993 connecting to ZooKeeper ensemble=127.0.0.1:58757 2024-11-10T15:15:35,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369930x0, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:15:35,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36993-0x1010272f7850003 connected 2024-11-10T15:15:35,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:35,932 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:15:35,933 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:15:35,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:15:35,935 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:15:35,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36993 2024-11-10T15:15:35,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36993 2024-11-10T15:15:35,938 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36993 2024-11-10T15:15:35,938 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36993 2024-11-10T15:15:35,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36993 2024-11-10T15:15:35,952 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b1c88e26310d:33459 2024-11-10T15:15:35,952 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b1c88e26310d,33459,1731251735818 2024-11-10T15:15:35,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,958 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b1c88e26310d,33459,1731251735818 2024-11-10T15:15:35,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:35,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:35,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:35,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:35,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:35,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:35,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:35,961 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T15:15:35,962 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b1c88e26310d,33459,1731251735818 from backup master directory 2024-11-10T15:15:35,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b1c88e26310d,33459,1731251735818 2024-11-10T15:15:35,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:15:35,964 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:35,964 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b1c88e26310d,33459,1731251735818 2024-11-10T15:15:35,970 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/hbase.id] with ID: d5a33c38-bcd5-484b-86de-3cd40690de8c 2024-11-10T15:15:35,970 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/.tmp/hbase.id 2024-11-10T15:15:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:15:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:15:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:15:35,980 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/.tmp/hbase.id]:[hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/hbase.id] 2024-11-10T15:15:35,996 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:15:35,996 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T15:15:35,997 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T15:15:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:15:36,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:15:36,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:15:36,011 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:15:36,012 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T15:15:36,013 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:15:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:15:36,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:15:36,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:15:36,026 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store 2024-11-10T15:15:36,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:15:36,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:15:36,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:15:36,039 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:36,039 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:36,039 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731251736039Disabling compacts and flushes for region at 1731251736039Disabling writes for close at 1731251736039Writing region close event to WAL at 1731251736039Closed at 1731251736039 2024-11-10T15:15:36,040 WARN [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/.initializing 2024-11-10T15:15:36,041 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/WALs/b1c88e26310d,33459,1731251735818 2024-11-10T15:15:36,044 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C33459%2C1731251735818, suffix=, logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/WALs/b1c88e26310d,33459,1731251735818, archiveDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/oldWALs, maxLogs=10 2024-11-10T15:15:36,045 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b1c88e26310d%2C33459%2C1731251735818.1731251736045 2024-11-10T15:15:36,055 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/WALs/b1c88e26310d,33459,1731251735818/b1c88e26310d%2C33459%2C1731251735818.1731251736045 2024-11-10T15:15:36,056 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46067:46067),(127.0.0.1/127.0.0.1:35261:35261),(127.0.0.1/127.0.0.1:46061:46061)] 2024-11-10T15:15:36,057 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:36,057 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:36,057 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,057 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T15:15:36,061 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T15:15:36,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:36,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T15:15:36,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:36,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T15:15:36,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:36,069 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,070 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,070 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,072 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,072 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,072 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:36,074 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:15:36,076 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:36,077 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68140848, jitterRate=0.01537775993347168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:36,077 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731251736058Initializing all the Stores at 1731251736059 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736059Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251736059Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251736059Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251736059Cleaning up temporary data from old regions at 1731251736072 (+13 ms)Region opened successfully at 1731251736077 (+5 ms) 2024-11-10T15:15:36,078 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T15:15:36,082 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25d627b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:36,083 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T15:15:36,083 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T15:15:36,083 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T15:15:36,083 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T15:15:36,083 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T15:15:36,084 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T15:15:36,084 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T15:15:36,086 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T15:15:36,087 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T15:15:36,089 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T15:15:36,089 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T15:15:36,090 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T15:15:36,091 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T15:15:36,092 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T15:15:36,092 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T15:15:36,094 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T15:15:36,094 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T15:15:36,096 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T15:15:36,098 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T15:15:36,099 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T15:15:36,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:36,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:36,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:36,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:36,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,102 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b1c88e26310d,33459,1731251735818, sessionid=0x1010272f7850000, setting cluster-up flag (Was=false) 2024-11-10T15:15:36,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,111 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T15:15:36,112 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b1c88e26310d,33459,1731251735818 2024-11-10T15:15:36,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,124 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T15:15:36,125 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b1c88e26310d,33459,1731251735818 2024-11-10T15:15:36,126 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T15:15:36,129 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:36,130 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T15:15:36,130 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T15:15:36,130 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b1c88e26310d,33459,1731251735818 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T15:15:36,132 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:36,132 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:36,132 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:36,132 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b1c88e26310d:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:15:36,132 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b1c88e26310d:0, corePoolSize=10, maxPoolSize=10 2024-11-10T15:15:36,133 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,133 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:36,133 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,135 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:36,136 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T15:15:36,136 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731251766136 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T15:15:36,137 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T15:15:36,137 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T15:15:36,137 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,138 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T15:15:36,138 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T15:15:36,138 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T15:15:36,139 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251736139,5,FailOnTimeoutGroup] 2024-11-10T15:15:36,139 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251736139,5,FailOnTimeoutGroup] 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,139 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,143 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(746): ClusterId : d5a33c38-bcd5-484b-86de-3cd40690de8c 2024-11-10T15:15:36,143 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:36,143 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(746): ClusterId : d5a33c38-bcd5-484b-86de-3cd40690de8c 2024-11-10T15:15:36,143 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:36,145 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(746): ClusterId : d5a33c38-bcd5-484b-86de-3cd40690de8c 2024-11-10T15:15:36,146 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:15:36,148 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:36,148 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:36,148 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:36,148 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:36,148 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:15:36,149 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:15:36,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:15:36,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:15:36,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:15:36,152 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:36,153 DEBUG [RS:1;b1c88e26310d:36773 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74f05241, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:36,153 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:36,153 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T15:15:36,153 DEBUG [RS:0;b1c88e26310d:45831 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@acf5b10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:36,153 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 2024-11-10T15:15:36,156 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:15:36,157 DEBUG [RS:2;b1c88e26310d:36993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fbf3607, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b1c88e26310d/172.17.0.2:0 2024-11-10T15:15:36,166 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b1c88e26310d:45831 2024-11-10T15:15:36,166 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:36,166 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:36,166 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:36,167 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b1c88e26310d:36773 2024-11-10T15:15:36,167 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:36,167 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:36,167 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,33459,1731251735818 with port=45831, startcode=1731251735867 2024-11-10T15:15:36,167 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:36,168 DEBUG [RS:0;b1c88e26310d:45831 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:36,168 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b1c88e26310d:36993 2024-11-10T15:15:36,168 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:15:36,168 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:15:36,168 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:15:36,168 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,33459,1731251735818 with port=36773, startcode=1731251735896 2024-11-10T15:15:36,168 DEBUG [RS:1;b1c88e26310d:36773 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:36,169 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(2659): reportForDuty to master=b1c88e26310d,33459,1731251735818 with port=36993, startcode=1731251735923 2024-11-10T15:15:36,169 DEBUG [RS:2;b1c88e26310d:36993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:15:36,171 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40821, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:36,171 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33167, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:36,171 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,171 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43841, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:15:36,171 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:15:36,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:15:36,174 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 2024-11-10T15:15:36,174 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42151 2024-11-10T15:15:36,174 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:36,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:15:36,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:36,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] master.ServerManager(517): Registering regionserver=b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:36,177 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 2024-11-10T15:15:36,177 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42151 2024-11-10T15:15:36,177 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:36,177 DEBUG [RS:0;b1c88e26310d:45831 {}] zookeeper.ZKUtil(111): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,177 WARN [RS:0;b1c88e26310d:45831 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:36,178 INFO [RS:0;b1c88e26310d:45831 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:15:36,178 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:15:36,181 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,36773,1731251735896] 2024-11-10T15:15:36,181 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,45831,1731251735867] 2024-11-10T15:15:36,181 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 2024-11-10T15:15:36,181 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42151 2024-11-10T15:15:36,181 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:15:36,182 DEBUG [RS:1;b1c88e26310d:36773 {}] zookeeper.ZKUtil(111): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,182 WARN [RS:1;b1c88e26310d:36773 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:36,182 INFO [RS:1;b1c88e26310d:36773 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:15:36,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:15:36,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,182 DEBUG [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:15:36,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:15:36,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:36,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:15:36,186 DEBUG [RS:2;b1c88e26310d:36993 {}] zookeeper.ZKUtil(111): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,186 WARN [RS:2;b1c88e26310d:36993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:15:36,187 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b1c88e26310d,36993,1731251735923] 2024-11-10T15:15:36,187 INFO [RS:2;b1c88e26310d:36993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:15:36,187 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,187 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:36,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:15:36,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:15:36,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:15:36,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,190 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:36,190 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:36,190 INFO [RS:0;b1c88e26310d:45831 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:36,191 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:15:36,191 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:36,192 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:15:36,192 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:36,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740 2024-11-10T15:15:36,192 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:36,193 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,193 INFO [RS:1;b1c88e26310d:36773 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,193 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,194 DEBUG [RS:0;b1c88e26310d:45831 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,194 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,194 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,195 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,195 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:15:36,195 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:15:36,195 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45831,1731251735867-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:36,195 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:15:36,195 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:36,195 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,195 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,195 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,195 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:36,195 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,196 INFO [RS:2;b1c88e26310d:36993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:15:36,196 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,196 DEBUG [RS:1;b1c88e26310d:36773 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:15:36,200 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:15:36,201 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:15:36,201 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,201 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,201 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,202 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,202 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,202 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,36773,1731251735896-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:36,202 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,202 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b1c88e26310d:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:15:36,202 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,202 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,203 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70748512, jitterRate=0.054234981536865234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b1c88e26310d:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,203 DEBUG [RS:2;b1c88e26310d:36993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731251736176Initializing all the Stores at 1731251736177 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736177Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736180 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251736180Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736180Cleaning up temporary data from old regions at 1731251736195 (+15 ms)Region opened successfully at 1731251736204 (+9 ms) 2024-11-10T15:15:36,204 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,36993,1731251735923-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:36,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:15:36,204 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:15:36,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:15:36,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:15:36,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:15:36,208 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:36,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731251736204Disabling compacts and flushes for region at 1731251736204Disabling writes for close at 1731251736204Writing region close event to WAL at 1731251736207 (+3 ms)Closed at 1731251736208 (+1 ms) 2024-11-10T15:15:36,210 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:36,210 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T15:15:36,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T15:15:36,212 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:15:36,214 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T15:15:36,215 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:36,215 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,45831,1731251735867-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,216 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,216 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.Replication(171): b1c88e26310d,45831,1731251735867 started 2024-11-10T15:15:36,219 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:36,219 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,36773,1731251735896-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,219 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,220 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.Replication(171): b1c88e26310d,36773,1731251735896 started 2024-11-10T15:15:36,225 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:15:36,225 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,36993,1731251735923-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,226 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,226 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.Replication(171): b1c88e26310d,36993,1731251735923 started 2024-11-10T15:15:36,235 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,235 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,36773,1731251735896, RpcServer on b1c88e26310d/172.17.0.2:36773, sessionid=0x1010272f7850002 2024-11-10T15:15:36,235 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:36,235 DEBUG [RS:1;b1c88e26310d:36773 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,235 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,36773,1731251735896' 2024-11-10T15:15:36,235 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:36,236 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,36773,1731251735896 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,36773,1731251735896' 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:36,237 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:36,237 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,238 DEBUG [RS:1;b1c88e26310d:36773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:36,238 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,45831,1731251735867, RpcServer on b1c88e26310d/172.17.0.2:45831, sessionid=0x1010272f7850001 2024-11-10T15:15:36,238 INFO [RS:1;b1c88e26310d:36773 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:36,238 INFO [RS:1;b1c88e26310d:36773 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:36,238 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:36,238 DEBUG [RS:0;b1c88e26310d:45831 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,238 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,45831,1731251735867' 2024-11-10T15:15:36,238 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,45831,1731251735867 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,45831,1731251735867' 2024-11-10T15:15:36,239 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:36,240 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:36,240 DEBUG [RS:0;b1c88e26310d:45831 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:36,240 INFO [RS:0;b1c88e26310d:45831 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:36,241 INFO [RS:0;b1c88e26310d:45831 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:36,244 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,245 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1482): Serving as b1c88e26310d,36993,1731251735923, RpcServer on b1c88e26310d/172.17.0.2:36993, sessionid=0x1010272f7850003 2024-11-10T15:15:36,245 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:15:36,245 DEBUG [RS:2;b1c88e26310d:36993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,245 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,36993,1731251735923' 2024-11-10T15:15:36,245 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:15:36,245 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b1c88e26310d,36993,1731251735923' 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:15:36,246 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:15:36,247 DEBUG [RS:2;b1c88e26310d:36993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:15:36,247 INFO [RS:2;b1c88e26310d:36993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:15:36,247 INFO [RS:2;b1c88e26310d:36993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:15:36,341 INFO [RS:1;b1c88e26310d:36773 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C36773%2C1731251735896, suffix=, logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36773,1731251735896, archiveDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs, maxLogs=32 2024-11-10T15:15:36,343 INFO [RS:1;b1c88e26310d:36773 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b1c88e26310d%2C36773%2C1731251735896.1731251736343 2024-11-10T15:15:36,343 INFO [RS:0;b1c88e26310d:45831 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C45831%2C1731251735867, suffix=, logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,45831,1731251735867, archiveDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs, maxLogs=32 2024-11-10T15:15:36,344 INFO [RS:0;b1c88e26310d:45831 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b1c88e26310d%2C45831%2C1731251735867.1731251736344 2024-11-10T15:15:36,349 INFO [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C36993%2C1731251735923, suffix=, logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36993,1731251735923, archiveDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs, maxLogs=32 2024-11-10T15:15:36,350 INFO [RS:2;b1c88e26310d:36993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b1c88e26310d%2C36993%2C1731251735923.1731251736350 2024-11-10T15:15:36,353 INFO [RS:1;b1c88e26310d:36773 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36773,1731251735896/b1c88e26310d%2C36773%2C1731251735896.1731251736343 2024-11-10T15:15:36,358 INFO [RS:0;b1c88e26310d:45831 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,45831,1731251735867/b1c88e26310d%2C45831%2C1731251735867.1731251736344 2024-11-10T15:15:36,364 WARN [b1c88e26310d:33459 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T15:15:36,365 DEBUG [RS:1;b1c88e26310d:36773 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35261:35261),(127.0.0.1/127.0.0.1:46061:46061),(127.0.0.1/127.0.0.1:46067:46067)] 2024-11-10T15:15:36,366 DEBUG [RS:0;b1c88e26310d:45831 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35261:35261),(127.0.0.1/127.0.0.1:46061:46061),(127.0.0.1/127.0.0.1:46067:46067)] 2024-11-10T15:15:36,370 INFO [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36993,1731251735923/b1c88e26310d%2C36993%2C1731251735923.1731251736350 2024-11-10T15:15:36,374 DEBUG [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35261:35261),(127.0.0.1/127.0.0.1:46061:46061),(127.0.0.1/127.0.0.1:46067:46067)] 2024-11-10T15:15:36,615 DEBUG [b1c88e26310d:33459 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T15:15:36,615 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(204): Hosts are {b1c88e26310d=0} racks are {/default-rack=0} 2024-11-10T15:15:36,617 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:15:36,617 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:15:36,617 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:15:36,617 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:15:36,617 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:15:36,618 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:15:36,618 INFO [b1c88e26310d:33459 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:15:36,618 INFO [b1c88e26310d:33459 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:15:36,618 INFO [b1c88e26310d:33459 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:15:36,618 DEBUG [b1c88e26310d:33459 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:15:36,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,620 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b1c88e26310d,36993,1731251735923, state=OPENING 2024-11-10T15:15:36,622 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T15:15:36,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:36,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,624 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:15:36,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b1c88e26310d,36993,1731251735923}] 2024-11-10T15:15:36,779 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:15:36,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54905, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:15:36,786 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T15:15:36,787 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:15:36,789 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b1c88e26310d%2C36993%2C1731251735923.meta, suffix=.meta, logDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36993,1731251735923, archiveDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs, maxLogs=32 2024-11-10T15:15:36,790 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b1c88e26310d%2C36993%2C1731251735923.meta.1731251736789.meta 2024-11-10T15:15:36,797 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/WALs/b1c88e26310d,36993,1731251735923/b1c88e26310d%2C36993%2C1731251735923.meta.1731251736789.meta 2024-11-10T15:15:36,798 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35261:35261),(127.0.0.1/127.0.0.1:46067:46067),(127.0.0.1/127.0.0.1:46061:46061)] 2024-11-10T15:15:36,799 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:36,799 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T15:15:36,799 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T15:15:36,799 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T15:15:36,799 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T15:15:36,799 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:36,800 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T15:15:36,800 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T15:15:36,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:15:36,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:15:36,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:15:36,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:15:36,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:15:36,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:15:36,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:15:36,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:15:36,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:15:36,808 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:15:36,809 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740 2024-11-10T15:15:36,810 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740 2024-11-10T15:15:36,812 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:15:36,812 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:15:36,813 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:15:36,814 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:15:36,815 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71209394, jitterRate=0.06110265851020813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:15:36,816 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T15:15:36,817 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731251736800Writing region info on filesystem at 1731251736800Initializing all the Stores at 1731251736801 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736801Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736801Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251736801Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731251736801Cleaning up temporary data from old regions at 1731251736812 (+11 ms)Running coprocessor post-open hooks at 1731251736816 (+4 ms)Region opened successfully at 1731251736816 2024-11-10T15:15:36,818 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731251736779 2024-11-10T15:15:36,822 DEBUG [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T15:15:36,822 INFO [RS_OPEN_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T15:15:36,823 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,824 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b1c88e26310d,36993,1731251735923, state=OPEN 2024-11-10T15:15:36,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:36,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:36,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:36,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:15:36,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:15:36,826 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b1c88e26310d,36993,1731251735923 2024-11-10T15:15:36,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T15:15:36,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b1c88e26310d,36993,1731251735923 in 203 msec 2024-11-10T15:15:36,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T15:15:36,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 621 msec 2024-11-10T15:15:36,837 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:15:36,837 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T15:15:36,838 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:15:36,839 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b1c88e26310d,36993,1731251735923, seqNum=-1] 2024-11-10T15:15:36,839 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:36,841 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35605, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:36,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 719 msec 2024-11-10T15:15:36,849 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731251736849, completionTime=-1 2024-11-10T15:15:36,849 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T15:15:36,849 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T15:15:36,851 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731251796851 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731251856852 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,852 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b1c88e26310d:33459, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,853 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,853 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,855 DEBUG [master/b1c88e26310d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T15:15:36,857 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.893sec 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:15:36,858 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T15:15:36,861 DEBUG [master/b1c88e26310d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T15:15:36,861 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T15:15:36,861 INFO [master/b1c88e26310d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b1c88e26310d,33459,1731251735818-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:15:36,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5738ac9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:36,943 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b1c88e26310d,33459,-1 for getting cluster id 2024-11-10T15:15:36,943 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T15:15:36,945 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd5a33c38-bcd5-484b-86de-3cd40690de8c' 2024-11-10T15:15:36,945 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T15:15:36,946 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d5a33c38-bcd5-484b-86de-3cd40690de8c" 2024-11-10T15:15:36,946 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71b8a392, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:36,946 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b1c88e26310d,33459,-1] 2024-11-10T15:15:36,946 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T15:15:36,947 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:36,948 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40352, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T15:15:36,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55653567, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:15:36,950 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:15:36,951 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b1c88e26310d,36993,1731251735923, seqNum=-1] 2024-11-10T15:15:36,952 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:36,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:36,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b1c88e26310d,33459,1731251735818 2024-11-10T15:15:36,957 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T15:15:36,958 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is b1c88e26310d,33459,1731251735818 2024-11-10T15:15:36,958 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5a5d0187 2024-11-10T15:15:36,958 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T15:15:36,960 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T15:15:36,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:15:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T15:15:36,965 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T15:15:36,965 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:36,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T15:15:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:36,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T15:15:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:15:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:15:36,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:15:36,980 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da856137927a0d698f060b424af84e67, NAME => 'TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8 2024-11-10T15:15:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:15:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:15:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:15:36,992 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:36,992 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing da856137927a0d698f060b424af84e67, disabling compactions & flushes 2024-11-10T15:15:36,992 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:36,992 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:36,992 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. after waiting 0 ms 2024-11-10T15:15:36,992 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:36,992 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:36,993 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for da856137927a0d698f060b424af84e67: Waiting for close lock at 1731251736992Disabling compacts and flushes for region at 1731251736992Disabling writes for close at 1731251736992Writing region close event to WAL at 1731251736992Closed at 1731251736992 2024-11-10T15:15:36,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T15:15:36,995 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731251736994"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731251736994"}]},"ts":"1731251736994"} 2024-11-10T15:15:36,998 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T15:15:36,999 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T15:15:37,000 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731251736999"}]},"ts":"1731251736999"} 2024-11-10T15:15:37,002 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T15:15:37,003 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b1c88e26310d=0} racks are {/default-rack=0} 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:15:37,004 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:15:37,004 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:15:37,004 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:15:37,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:15:37,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da856137927a0d698f060b424af84e67, ASSIGN}] 2024-11-10T15:15:37,006 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da856137927a0d698f060b424af84e67, ASSIGN 2024-11-10T15:15:37,008 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da856137927a0d698f060b424af84e67, ASSIGN; state=OFFLINE, location=b1c88e26310d,45831,1731251735867; forceNewPlan=false, retain=false 2024-11-10T15:15:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:37,159 INFO [b1c88e26310d:33459 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T15:15:37,159 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da856137927a0d698f060b424af84e67, regionState=OPENING, regionLocation=b1c88e26310d,45831,1731251735867 2024-11-10T15:15:37,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da856137927a0d698f060b424af84e67, ASSIGN because future has completed 2024-11-10T15:15:37,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da856137927a0d698f060b424af84e67, server=b1c88e26310d,45831,1731251735867}] 2024-11-10T15:15:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:37,318 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:15:37,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37005, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:15:37,325 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,325 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => da856137927a0d698f060b424af84e67, NAME => 'TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:15:37,325 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,326 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:15:37,326 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,326 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,328 INFO [StoreOpener-da856137927a0d698f060b424af84e67-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,329 INFO [StoreOpener-da856137927a0d698f060b424af84e67-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da856137927a0d698f060b424af84e67 columnFamilyName cf 2024-11-10T15:15:37,329 DEBUG [StoreOpener-da856137927a0d698f060b424af84e67-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:15:37,330 INFO [StoreOpener-da856137927a0d698f060b424af84e67-1 {}] regionserver.HStore(327): Store=da856137927a0d698f060b424af84e67/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:15:37,330 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,331 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,331 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,332 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,332 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,334 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,336 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:15:37,337 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened da856137927a0d698f060b424af84e67; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68268387, jitterRate=0.017278239130973816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T15:15:37,337 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,338 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for da856137927a0d698f060b424af84e67: Running coprocessor pre-open hook at 1731251737326Writing region info on filesystem at 1731251737326Initializing all the Stores at 1731251737327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731251737327Cleaning up temporary data from old regions at 1731251737332 (+5 ms)Running coprocessor post-open hooks at 1731251737337 (+5 ms)Region opened successfully at 1731251737338 (+1 ms) 2024-11-10T15:15:37,339 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67., pid=6, masterSystemTime=1731251737318 2024-11-10T15:15:37,342 DEBUG [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,342 INFO [RS_OPEN_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,343 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da856137927a0d698f060b424af84e67, regionState=OPEN, openSeqNum=2, regionLocation=b1c88e26310d,45831,1731251735867 2024-11-10T15:15:37,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da856137927a0d698f060b424af84e67, server=b1c88e26310d,45831,1731251735867 because future has completed 2024-11-10T15:15:37,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T15:15:37,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure da856137927a0d698f060b424af84e67, server=b1c88e26310d,45831,1731251735867 in 185 msec 2024-11-10T15:15:37,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T15:15:37,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da856137927a0d698f060b424af84e67, ASSIGN in 348 msec 2024-11-10T15:15:37,357 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T15:15:37,358 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731251737357"}]},"ts":"1731251737357"} 2024-11-10T15:15:37,361 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T15:15:37,362 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T15:15:37,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 401 msec 2024-11-10T15:15:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:15:37,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T15:15:37,597 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:15:37,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:15:37,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T15:15:37,605 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:15:37,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T15:15:37,609 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67., hostname=b1c88e26310d,45831,1731251735867, seqNum=2] 2024-11-10T15:15:37,609 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:15:37,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:15:37,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-10T15:15:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T15:15:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:37,618 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T15:15:37,619 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T15:15:37,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T15:15:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:37,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45831 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T15:15:37,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,775 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing da856137927a0d698f060b424af84e67 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T15:15:37,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/.tmp/cf/c1518f55a6a84872b81fcc5dd64387a6 is 36, key is row/cf:cq/1731251737612/Put/seqid=0 2024-11-10T15:15:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:15:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:15:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:15:37,801 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/.tmp/cf/c1518f55a6a84872b81fcc5dd64387a6 2024-11-10T15:15:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/.tmp/cf/c1518f55a6a84872b81fcc5dd64387a6 as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/cf/c1518f55a6a84872b81fcc5dd64387a6 2024-11-10T15:15:37,819 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/cf/c1518f55a6a84872b81fcc5dd64387a6, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T15:15:37,821 INFO [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for da856137927a0d698f060b424af84e67 in 46ms, sequenceid=5, compaction requested=false 2024-11-10T15:15:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for da856137927a0d698f060b424af84e67: 2024-11-10T15:15:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b1c88e26310d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T15:15:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T15:15:37,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T15:15:37,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-11-10T15:15:37,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 214 msec 2024-11-10T15:15:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:15:37,937 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:15:37,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T15:15:37,941 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:15:37,941 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:37,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,942 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T15:15:37,942 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T15:15:37,942 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1596513133, stopped=false 2024-11-10T15:15:37,942 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b1c88e26310d,33459,1731251735818 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:37,945 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:15:37,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:37,945 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:15:37,946 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:37,946 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:37,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,45831,1731251735867' ***** 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,36773,1731251735896' ***** 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:37,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b1c88e26310d,36993,1731251735923' ***** 2024-11-10T15:15:37,946 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:15:37,946 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:37,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:15:37,947 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:37,947 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(3091): Received CLOSE for da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,36993,1731251735923 2024-11-10T15:15:37,947 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,45831,1731251735867 2024-11-10T15:15:37,947 INFO [RS:2;b1c88e26310d:36993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b1c88e26310d:36993. 2024-11-10T15:15:37,947 INFO [RS:1;b1c88e26310d:36773 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:15:37,947 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:37,947 INFO [RS:1;b1c88e26310d:36773 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:15:37,947 DEBUG [RS:2;b1c88e26310d:36993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:37,947 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(959): stopping server b1c88e26310d,36773,1731251735896 2024-11-10T15:15:37,947 INFO [RS:0;b1c88e26310d:45831 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b1c88e26310d:45831. 2024-11-10T15:15:37,948 DEBUG [RS:2;b1c88e26310d:36993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,948 INFO [RS:1;b1c88e26310d:36773 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:37,948 DEBUG [RS:0;b1c88e26310d:45831 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing da856137927a0d698f060b424af84e67, disabling compactions & flushes 2024-11-10T15:15:37,948 INFO [RS:1;b1c88e26310d:36773 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b1c88e26310d:36773. 2024-11-10T15:15:37,948 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:37,948 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:37,948 DEBUG [RS:0;b1c88e26310d:45831 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,948 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:37,948 INFO [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,948 DEBUG [RS:1;b1c88e26310d:36773 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,948 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T15:15:37,948 DEBUG [RS:1;b1c88e26310d:36773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,948 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. after waiting 0 ms 2024-11-10T15:15:37,948 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1325): Online Regions={da856137927a0d698f060b424af84e67=TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67.} 2024-11-10T15:15:37,948 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,36773,1731251735896; all regions closed. 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,948 DEBUG [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1351): Waiting on da856137927a0d698f060b424af84e67 2024-11-10T15:15:37,948 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:15:37,948 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T15:15:37,948 DEBUG [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:15:37,948 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:15:37,948 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:15:37,949 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:15:37,949 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:15:37,949 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T15:15:37,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:37,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:37,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:37,952 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:37,952 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741833_1009 (size=93) 2024-11-10T15:15:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741833_1009 (size=93) 2024-11-10T15:15:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741833_1009 (size=93) 2024-11-10T15:15:37,962 DEBUG [RS:1;b1c88e26310d:36773 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b1c88e26310d%2C36773%2C1731251735896:(num 1731251736343) 2024-11-10T15:15:37,962 DEBUG [RS:1;b1c88e26310d:36773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:37,962 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/default/TestHBaseWalOnEC/da856137927a0d698f060b424af84e67/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:37,962 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:37,962 INFO [RS:1;b1c88e26310d:36773 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:37,963 INFO [RS:1;b1c88e26310d:36773 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36773 2024-11-10T15:15:37,963 INFO [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,963 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for da856137927a0d698f060b424af84e67: Waiting for close lock at 1731251737948Running coprocessor pre-close hooks at 1731251737948Disabling compacts and flushes for region at 1731251737948Disabling writes for close at 1731251737948Writing region close event to WAL at 1731251737952 (+4 ms)Running coprocessor post-close hooks at 1731251737962 (+10 ms)Closed at 1731251737963 (+1 ms) 2024-11-10T15:15:37,963 DEBUG [RS_CLOSE_REGION-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67. 2024-11-10T15:15:37,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,36773,1731251735896 2024-11-10T15:15:37,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:37,964 INFO [RS:1;b1c88e26310d:36773 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:37,966 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,36773,1731251735896] 2024-11-10T15:15:37,967 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,36773,1731251735896 already deleted, retry=false 2024-11-10T15:15:37,967 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,36773,1731251735896 expired; onlineServers=2 2024-11-10T15:15:37,971 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/info/7d073cfca5c04b22b5b5ba12255beaa1 is 153, key is TestHBaseWalOnEC,,1731251736961.da856137927a0d698f060b424af84e67./info:regioninfo/1731251737343/Put/seqid=0 2024-11-10T15:15:37,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:15:37,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:15:37,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:15:37,979 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/info/7d073cfca5c04b22b5b5ba12255beaa1 2024-11-10T15:15:37,997 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:38,003 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/ns/b8263587791b4e168119706810fc0894 is 43, key is default/ns:d/1731251736841/Put/seqid=0 2024-11-10T15:15:38,004 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:38,005 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T15:15:38,006 INFO [regionserver/b1c88e26310d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:38,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:15:38,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:15:38,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T15:15:38,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:15:38,013 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/ns/b8263587791b4e168119706810fc0894 2024-11-10T15:15:38,043 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/table/72218a66056645b49d3118123b6d04ad is 52, key is TestHBaseWalOnEC/table:state/1731251737357/Put/seqid=0 2024-11-10T15:15:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:15:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:15:38,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:15:38,053 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/table/72218a66056645b49d3118123b6d04ad 2024-11-10T15:15:38,061 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/info/7d073cfca5c04b22b5b5ba12255beaa1 as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/info/7d073cfca5c04b22b5b5ba12255beaa1 2024-11-10T15:15:38,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,066 INFO [RS:1;b1c88e26310d:36773 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:38,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36773-0x1010272f7850002, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,066 INFO [RS:1;b1c88e26310d:36773 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,36773,1731251735896; zookeeper connection closed. 2024-11-10T15:15:38,067 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@13fdc478 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@13fdc478 2024-11-10T15:15:38,068 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/info/7d073cfca5c04b22b5b5ba12255beaa1, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T15:15:38,070 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/ns/b8263587791b4e168119706810fc0894 as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/ns/b8263587791b4e168119706810fc0894 2024-11-10T15:15:38,077 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/ns/b8263587791b4e168119706810fc0894, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T15:15:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T15:15:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T15:15:38,078 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/.tmp/table/72218a66056645b49d3118123b6d04ad as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/table/72218a66056645b49d3118123b6d04ad 2024-11-10T15:15:38,085 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/table/72218a66056645b49d3118123b6d04ad, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T15:15:38,086 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-11-10T15:15:38,092 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T15:15:38,092 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T15:15:38,092 INFO [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:38,093 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731251737948Running coprocessor pre-close hooks at 1731251737948Disabling compacts and flushes for region at 1731251737948Disabling writes for close at 1731251737949 (+1 ms)Obtaining lock to block concurrent updates at 1731251737949Preparing flush snapshotting stores in 1588230740 at 1731251737949Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731251737949Flushing stores of hbase:meta,,1.1588230740 at 1731251737951 (+2 ms)Flushing 1588230740/info: creating writer at 1731251737951Flushing 1588230740/info: appending metadata at 1731251737971 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731251737971Flushing 1588230740/ns: creating writer at 1731251737986 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731251738002 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731251738002Flushing 1588230740/table: creating writer at 1731251738021 (+19 ms)Flushing 1588230740/table: appending metadata at 1731251738043 (+22 ms)Flushing 1588230740/table: closing flushed file at 1731251738043Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19af7c95: reopening flushed file at 1731251738060 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@434913ea: reopening flushed file at 1731251738069 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5db8ba99: reopening flushed file at 1731251738077 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1731251738086 (+9 ms)Writing region close event to WAL at 1731251738088 (+2 ms)Running coprocessor post-close hooks at 1731251738092 (+4 ms)Closed at 1731251738092 2024-11-10T15:15:38,093 DEBUG [RS_CLOSE_META-regionserver/b1c88e26310d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T15:15:38,148 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,45831,1731251735867; all regions closed. 2024-11-10T15:15:38,148 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(976): stopping server b1c88e26310d,36993,1731251735923; all regions closed. 2024-11-10T15:15:38,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,149 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,150 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,150 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:15:38,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741834_1010 (size=1298) 2024-11-10T15:15:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741834_1010 (size=1298) 2024-11-10T15:15:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741834_1010 (size=1298) 2024-11-10T15:15:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:15:38,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:15:38,157 DEBUG [RS:0;b1c88e26310d:45831 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b1c88e26310d%2C45831%2C1731251735867:(num 1731251736344) 2024-11-10T15:15:38,157 DEBUG [RS:0;b1c88e26310d:45831 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:38,157 DEBUG [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs 2024-11-10T15:15:38,157 INFO [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b1c88e26310d%2C36993%2C1731251735923.meta:.meta(num 1731251736789) 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:15:38,157 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:15:38,158 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:15:38,158 INFO [RS:0;b1c88e26310d:45831 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:38,158 INFO [RS:0;b1c88e26310d:45831 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45831 2024-11-10T15:15:38,158 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:38,159 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,159 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,45831,1731251735867 2024-11-10T15:15:38,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:38,161 INFO [RS:0;b1c88e26310d:45831 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:38,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:15:38,164 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,45831,1731251735867] 2024-11-10T15:15:38,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:15:38,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:15:38,165 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,45831,1731251735867 already deleted, retry=false 2024-11-10T15:15:38,166 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,45831,1731251735867 expired; onlineServers=1 2024-11-10T15:15:38,167 DEBUG [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/oldWALs 2024-11-10T15:15:38,167 INFO [RS:2;b1c88e26310d:36993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b1c88e26310d%2C36993%2C1731251735923:(num 1731251736350) 2024-11-10T15:15:38,167 DEBUG [RS:2;b1c88e26310d:36993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:15:38,167 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:15:38,167 INFO [RS:2;b1c88e26310d:36993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:38,168 INFO [RS:2;b1c88e26310d:36993 {}] hbase.ChoreService(370): Chore service for: regionserver/b1c88e26310d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:38,168 INFO [RS:2;b1c88e26310d:36993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:38,168 INFO [regionserver/b1c88e26310d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:38,168 INFO [RS:2;b1c88e26310d:36993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36993 2024-11-10T15:15:38,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b1c88e26310d,36993,1731251735923 2024-11-10T15:15:38,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:15:38,170 INFO [RS:2;b1c88e26310d:36993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:38,172 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b1c88e26310d,36993,1731251735923] 2024-11-10T15:15:38,174 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b1c88e26310d,36993,1731251735923 already deleted, retry=false 2024-11-10T15:15:38,174 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b1c88e26310d,36993,1731251735923 expired; onlineServers=0 2024-11-10T15:15:38,174 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b1c88e26310d,33459,1731251735818' ***** 2024-11-10T15:15:38,174 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T15:15:38,174 INFO [M:0;b1c88e26310d:33459 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:15:38,174 INFO [M:0;b1c88e26310d:33459 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:15:38,174 DEBUG [M:0;b1c88e26310d:33459 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T15:15:38,174 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T15:15:38,174 DEBUG [M:0;b1c88e26310d:33459 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T15:15:38,174 DEBUG [master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251736139 {}] cleaner.HFileCleaner(306): Exit Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.large.0-1731251736139,5,FailOnTimeoutGroup] 2024-11-10T15:15:38,174 DEBUG [master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251736139 {}] cleaner.HFileCleaner(306): Exit Thread[master/b1c88e26310d:0:becomeActiveMaster-HFileCleaner.small.0-1731251736139,5,FailOnTimeoutGroup] 2024-11-10T15:15:38,174 INFO [M:0;b1c88e26310d:33459 {}] hbase.ChoreService(370): Chore service for: master/b1c88e26310d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T15:15:38,174 INFO [M:0;b1c88e26310d:33459 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:15:38,175 DEBUG [M:0;b1c88e26310d:33459 {}] master.HMaster(1795): Stopping service threads 2024-11-10T15:15:38,175 INFO [M:0;b1c88e26310d:33459 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T15:15:38,175 INFO [M:0;b1c88e26310d:33459 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:15:38,175 INFO [M:0;b1c88e26310d:33459 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T15:15:38,175 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T15:15:38,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T15:15:38,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:15:38,177 DEBUG [M:0;b1c88e26310d:33459 {}] zookeeper.ZKUtil(347): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T15:15:38,177 WARN [M:0;b1c88e26310d:33459 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T15:15:38,177 INFO [M:0;b1c88e26310d:33459 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/.lastflushedseqids 2024-11-10T15:15:38,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741843_1019 (size=127) 2024-11-10T15:15:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741843_1019 (size=127) 2024-11-10T15:15:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741843_1019 (size=127) 2024-11-10T15:15:38,186 INFO [M:0;b1c88e26310d:33459 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T15:15:38,186 INFO [M:0;b1c88e26310d:33459 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T15:15:38,187 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:15:38,187 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:38,187 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:38,187 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:15:38,187 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:38,187 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-10T15:15:38,206 DEBUG [M:0;b1c88e26310d:33459 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/696b6439646b4cc2bb26f3a82445b93c is 82, key is hbase:meta,,1/info:regioninfo/1731251736823/Put/seqid=0 2024-11-10T15:15:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:15:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:15:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:15:38,215 INFO [M:0;b1c88e26310d:33459 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/696b6439646b4cc2bb26f3a82445b93c 2024-11-10T15:15:38,239 DEBUG [M:0;b1c88e26310d:33459 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a4046e02201499685ab6f090d80da86 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731251737364/Put/seqid=0 2024-11-10T15:15:38,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741845_1021 (size=6441) 2024-11-10T15:15:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741845_1021 (size=6441) 2024-11-10T15:15:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741845_1021 (size=6441) 2024-11-10T15:15:38,247 INFO [M:0;b1c88e26310d:33459 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a4046e02201499685ab6f090d80da86 2024-11-10T15:15:38,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,264 INFO [RS:0;b1c88e26310d:45831 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:38,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45831-0x1010272f7850001, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,264 INFO [RS:0;b1c88e26310d:45831 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,45831,1731251735867; zookeeper connection closed. 2024-11-10T15:15:38,264 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b511a08 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b511a08 2024-11-10T15:15:38,271 DEBUG [M:0;b1c88e26310d:33459 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59e1a087d3454371bd76d7b3021075a2 is 69, key is b1c88e26310d,36773,1731251735896/rs:state/1731251736174/Put/seqid=0 2024-11-10T15:15:38,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,272 INFO [RS:2;b1c88e26310d:36993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:38,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36993-0x1010272f7850003, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,273 INFO [RS:2;b1c88e26310d:36993 {}] regionserver.HRegionServer(1031): Exiting; stopping=b1c88e26310d,36993,1731251735923; zookeeper connection closed. 2024-11-10T15:15:38,273 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27b25242 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27b25242 2024-11-10T15:15:38,273 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T15:15:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:15:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:15:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:15:38,281 INFO [M:0;b1c88e26310d:33459 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59e1a087d3454371bd76d7b3021075a2 2024-11-10T15:15:38,289 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/696b6439646b4cc2bb26f3a82445b93c as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/696b6439646b4cc2bb26f3a82445b93c 2024-11-10T15:15:38,296 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/696b6439646b4cc2bb26f3a82445b93c, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T15:15:38,297 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a4046e02201499685ab6f090d80da86 as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a4046e02201499685ab6f090d80da86 2024-11-10T15:15:38,304 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a4046e02201499685ab6f090d80da86, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T15:15:38,305 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59e1a087d3454371bd76d7b3021075a2 as hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59e1a087d3454371bd76d7b3021075a2 2024-11-10T15:15:38,313 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42151/user/jenkins/test-data/4da928b0-6bc3-c838-fd76-4baa90a91ef8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59e1a087d3454371bd76d7b3021075a2, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T15:15:38,315 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false 2024-11-10T15:15:38,316 INFO [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:15:38,316 DEBUG [M:0;b1c88e26310d:33459 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731251738187Disabling compacts and flushes for region at 1731251738187Disabling writes for close at 1731251738187Obtaining lock to block concurrent updates at 1731251738187Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731251738187Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1731251738188 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731251738189 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731251738189Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731251738205 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731251738205Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731251738222 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731251738238 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731251738238Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731251738254 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731251738271 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731251738271Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7706e069: reopening flushed file at 1731251738288 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d3bb2d5: reopening flushed file at 1731251738296 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fcb8e93: reopening flushed file at 1731251738304 (+8 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false at 1731251738315 (+11 ms)Writing region close event to WAL at 1731251738316 (+1 ms)Closed at 1731251738316 2024-11-10T15:15:38,317 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,317 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,317 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,317 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:15:38,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741830_1006 (size=32695) 2024-11-10T15:15:38,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37183 is added to blk_1073741830_1006 (size=32695) 2024-11-10T15:15:38,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38487 is added to blk_1073741830_1006 (size=32695) 2024-11-10T15:15:38,322 INFO [M:0;b1c88e26310d:33459 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T15:15:38,322 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:15:38,323 INFO [M:0;b1c88e26310d:33459 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33459 2024-11-10T15:15:38,323 INFO [M:0;b1c88e26310d:33459 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:15:38,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,425 INFO [M:0;b1c88e26310d:33459 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:15:38,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33459-0x1010272f7850000, quorum=127.0.0.1:58757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:15:38,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@523e382d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:38,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cdad191{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:38,428 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:38,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3beb2b8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:38,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@381275e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:38,430 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:38,430 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:38,430 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:38,430 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-616955154-172.17.0.2-1731251734838 (Datanode Uuid 79f01456-2dab-42b3-8901-16231fc8a479) service to localhost/127.0.0.1:42151 2024-11-10T15:15:38,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data5/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data6/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,431 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:38,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@365855b2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:38,434 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b9c4315{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:38,434 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:38,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a71642{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:38,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ed2e3e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:38,436 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:38,436 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:38,436 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:38,436 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-616955154-172.17.0.2-1731251734838 (Datanode Uuid 36c55064-dfbb-4619-85fb-6331386546c4) service to localhost/127.0.0.1:42151 2024-11-10T15:15:38,437 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data3/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,437 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data4/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,437 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:38,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17794d45{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:15:38,440 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36d1058{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:38,440 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:38,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c867c3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:38,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6980ca59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:38,441 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:15:38,441 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:15:38,441 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:15:38,441 WARN [BP-616955154-172.17.0.2-1731251734838 heartbeating to localhost/127.0.0.1:42151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-616955154-172.17.0.2-1731251734838 (Datanode Uuid f9220acd-c87e-44b6-bebb-62f5e956296e) service to localhost/127.0.0.1:42151 2024-11-10T15:15:38,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data1/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/cluster_442b5a45-9d9f-de6e-9655-8cc3fcc1edb7/data/data2/current/BP-616955154-172.17.0.2-1731251734838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:15:38,442 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:15:38,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d30adc2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:15:38,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3df483b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:15:38,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:15:38,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1921d73d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:15:38,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@130ce80d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5707b0ad-15f6-69cb-9954-9b1b0a0be09d/hadoop.log.dir/,STOPPED} 2024-11-10T15:15:38,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T15:15:38,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T15:15:38,489 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=146 (was 86) - Thread LEAK? -, OpenFileDescriptor=521 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=64 (was 61) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9068 (was 9238)