2024-11-12 09:32:08,272 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 09:32:08,283 main DEBUG Took 0.009293 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 09:32:08,284 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 09:32:08,284 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 09:32:08,285 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 09:32:08,286 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,302 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 09:32:08,313 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,315 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,315 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,316 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,317 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,318 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,319 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,320 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,321 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,322 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,323 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 09:32:08,325 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,325 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 09:32:08,326 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 09:32:08,328 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 09:32:08,329 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 09:32:08,330 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 09:32:08,331 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 09:32:08,331 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 09:32:08,339 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 09:32:08,342 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 09:32:08,343 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 09:32:08,344 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 09:32:08,344 main DEBUG createAppenders(={Console}) 2024-11-12 09:32:08,345 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-12 09:32:08,345 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 09:32:08,346 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-12 09:32:08,346 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 09:32:08,346 main DEBUG OutputStream closed 2024-11-12 09:32:08,346 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 09:32:08,347 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 09:32:08,347 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-12 09:32:08,413 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 09:32:08,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 09:32:08,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 09:32:08,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 09:32:08,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 09:32:08,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 09:32:08,419 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 09:32:08,419 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 09:32:08,419 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 09:32:08,420 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 09:32:08,420 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 09:32:08,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 09:32:08,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 09:32:08,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 09:32:08,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 09:32:08,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 09:32:08,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 09:32:08,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 09:32:08,425 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 09:32:08,426 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-12 09:32:08,426 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 09:32:08,427 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-12T09:32:08,441 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-12 09:32:08,444 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 09:32:08,445 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T09:32:08,670 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf 2024-11-12T09:32:08,697 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183, deleteOnExit=true 2024-11-12T09:32:08,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/test.cache.data in system properties and HBase conf 2024-11-12T09:32:08,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T09:32:08,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir in system properties and HBase conf 2024-11-12T09:32:08,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T09:32:08,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T09:32:08,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T09:32:08,781 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T09:32:08,864 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T09:32:08,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T09:32:08,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T09:32:08,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T09:32:08,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T09:32:08,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T09:32:08,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T09:32:08,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T09:32:08,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T09:32:08,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T09:32:08,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/nfs.dump.dir in system properties and HBase conf 2024-11-12T09:32:08,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/java.io.tmpdir in system properties and HBase conf 2024-11-12T09:32:08,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T09:32:08,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T09:32:08,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T09:32:09,996 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T09:32:10,078 INFO [Time-limited test {}] log.Log(170): Logging initialized @2450ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T09:32:10,154 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:10,222 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:10,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:10,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:10,249 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T09:32:10,262 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:10,266 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:10,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:10,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/java.io.tmpdir/jetty-localhost-45747-hadoop-hdfs-3_4_1-tests_jar-_-any-3904794766676642061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T09:32:10,450 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:45747} 2024-11-12T09:32:10,451 INFO [Time-limited test {}] server.Server(415): Started @2824ms 2024-11-12T09:32:11,090 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:11,097 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:11,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:11,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:11,099 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:11,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:11,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:11,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/java.io.tmpdir/jetty-localhost-46695-hadoop-hdfs-3_4_1-tests_jar-_-any-15490895848341249476/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:11,198 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:46695} 2024-11-12T09:32:11,198 INFO [Time-limited test {}] server.Server(415): Started @3571ms 2024-11-12T09:32:11,245 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:11,346 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:11,354 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:11,355 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:11,355 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:11,355 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:11,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:11,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:11,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/java.io.tmpdir/jetty-localhost-44557-hadoop-hdfs-3_4_1-tests_jar-_-any-7255627851252014245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:11,460 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:44557} 2024-11-12T09:32:11,460 INFO [Time-limited test {}] server.Server(415): Started @3833ms 2024-11-12T09:32:11,462 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:11,495 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:11,501 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:11,502 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:11,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:11,503 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:11,503 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:11,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:11,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/java.io.tmpdir/jetty-localhost-36587-hadoop-hdfs-3_4_1-tests_jar-_-any-13544986373900286958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:11,613 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:36587} 2024-11-12T09:32:11,613 INFO [Time-limited test {}] server.Server(415): Started @3987ms 2024-11-12T09:32:11,615 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:13,067 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data4/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,067 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data3/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,093 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:13,106 WARN [Thread-131 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data1/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,106 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data2/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,123 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:13,141 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa876ab63ea1cc422 with lease ID 0x611b067a438717a6: Processing first storage report for DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c from datanode DatanodeRegistration(127.0.0.1:35337, datanodeUuid=629a177e-a519-4a1a-8b7d-9ca92883d176, infoPort=34915, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa876ab63ea1cc422 with lease ID 0x611b067a438717a6: from storage DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c node DatanodeRegistration(127.0.0.1:35337, datanodeUuid=629a177e-a519-4a1a-8b7d-9ca92883d176, infoPort=34915, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x757657b331efa45c with lease ID 0x611b067a438717a7: Processing first storage report for DS-46f738d4-202a-446b-839f-eccbf45ec130 from datanode DatanodeRegistration(127.0.0.1:36449, datanodeUuid=abc8d647-d43c-40e0-9b32-952da22bd94b, infoPort=37487, infoSecurePort=0, ipcPort=45467, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x757657b331efa45c with lease ID 0x611b067a438717a7: from storage DS-46f738d4-202a-446b-839f-eccbf45ec130 node DatanodeRegistration(127.0.0.1:36449, datanodeUuid=abc8d647-d43c-40e0-9b32-952da22bd94b, infoPort=37487, infoSecurePort=0, ipcPort=45467, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa876ab63ea1cc422 with lease ID 0x611b067a438717a6: Processing first storage report for DS-ca4d6f92-0e9b-4a73-91e7-e14a5368fe3b from datanode DatanodeRegistration(127.0.0.1:35337, datanodeUuid=629a177e-a519-4a1a-8b7d-9ca92883d176, infoPort=34915, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa876ab63ea1cc422 with lease ID 0x611b067a438717a6: from storage DS-ca4d6f92-0e9b-4a73-91e7-e14a5368fe3b node DatanodeRegistration(127.0.0.1:35337, datanodeUuid=629a177e-a519-4a1a-8b7d-9ca92883d176, infoPort=34915, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x757657b331efa45c with lease ID 0x611b067a438717a7: Processing first storage report for DS-07c95ab6-e92c-40e1-964a-ba276e10f3c9 from datanode DatanodeRegistration(127.0.0.1:36449, datanodeUuid=abc8d647-d43c-40e0-9b32-952da22bd94b, infoPort=37487, infoSecurePort=0, ipcPort=45467, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x757657b331efa45c with lease ID 0x611b067a438717a7: from storage DS-07c95ab6-e92c-40e1-964a-ba276e10f3c9 node DatanodeRegistration(127.0.0.1:36449, datanodeUuid=abc8d647-d43c-40e0-9b32-952da22bd94b, infoPort=37487, infoSecurePort=0, ipcPort=45467, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,225 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data6/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,225 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data5/current/BP-820054393-172.17.0.2-1731403929408/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:13,243 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:13,247 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa850fade1b2ba6e with lease ID 0x611b067a438717a8: Processing first storage report for DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f from datanode DatanodeRegistration(127.0.0.1:37913, datanodeUuid=87986298-ac6a-4a43-8071-71035f579b6e, infoPort=41707, infoSecurePort=0, ipcPort=38679, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa850fade1b2ba6e with lease ID 0x611b067a438717a8: from storage DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f node DatanodeRegistration(127.0.0.1:37913, datanodeUuid=87986298-ac6a-4a43-8071-71035f579b6e, infoPort=41707, infoSecurePort=0, ipcPort=38679, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa850fade1b2ba6e with lease ID 0x611b067a438717a8: Processing first storage report for DS-884c18e0-9f52-4f91-af71-48ac7ed69385 from datanode DatanodeRegistration(127.0.0.1:37913, datanodeUuid=87986298-ac6a-4a43-8071-71035f579b6e, infoPort=41707, infoSecurePort=0, ipcPort=38679, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408) 2024-11-12T09:32:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa850fade1b2ba6e with lease ID 0x611b067a438717a8: from storage DS-884c18e0-9f52-4f91-af71-48ac7ed69385 node DatanodeRegistration(127.0.0.1:37913, datanodeUuid=87986298-ac6a-4a43-8071-71035f579b6e, infoPort=41707, infoSecurePort=0, ipcPort=38679, storageInfo=lv=-57;cid=testClusterID;nsid=886755429;c=1731403929408), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T09:32:13,356 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf 2024-11-12T09:32:13,419 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-12T09:32:13,473 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=7918 2024-11-12T09:32:13,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T09:32:13,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-12T09:32:13,546 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/zookeeper_0, clientPort=63479, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T09:32:13,555 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63479 2024-11-12T09:32:13,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:13,568 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:13,660 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:13,661 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:13,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34556 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34556 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:13,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-12T09:32:14,125 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:14,133 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 with version=8 2024-11-12T09:32:14,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/hbase-staging 2024-11-12T09:32:14,221 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T09:32:14,455 INFO [Time-limited test {}] client.ConnectionUtils(128): master/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:14,463 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:14,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:14,470 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:14,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:14,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:14,618 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T09:32:14,680 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T09:32:14,690 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T09:32:14,694 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:14,721 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 95155 (auto-detected) 2024-11-12T09:32:14,722 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-12T09:32:14,740 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33601 2024-11-12T09:32:14,759 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33601 connecting to ZooKeeper ensemble=127.0.0.1:63479 2024-11-12T09:32:14,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:336010x0, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:14,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33601-0x1012e6363050000 connected 2024-11-12T09:32:14,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,013 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:15,016 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68, hbase.cluster.distributed=false 2024-11-12T09:32:15,038 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:15,043 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33601 2024-11-12T09:32:15,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33601 2024-11-12T09:32:15,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33601 2024-11-12T09:32:15,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33601 2024-11-12T09:32:15,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33601 2024-11-12T09:32:15,145 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:15,146 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,146 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,147 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:15,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:15,149 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:15,152 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:15,153 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39993 2024-11-12T09:32:15,154 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39993 connecting to ZooKeeper ensemble=127.0.0.1:63479 2024-11-12T09:32:15,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399930x0, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:15,177 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399930x0, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:15,177 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39993-0x1012e6363050001 connected 2024-11-12T09:32:15,181 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:15,188 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:15,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:15,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:15,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-12T09:32:15,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39993 2024-11-12T09:32:15,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39993 2024-11-12T09:32:15,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-12T09:32:15,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-12T09:32:15,216 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:15,216 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,217 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:15,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:15,218 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:15,218 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:15,220 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37187 2024-11-12T09:32:15,222 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37187 connecting to ZooKeeper ensemble=127.0.0.1:63479 2024-11-12T09:32:15,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,229 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,246 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371870x0, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:15,247 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37187-0x1012e6363050002 connected 2024-11-12T09:32:15,247 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:15,248 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:15,251 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:15,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:15,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:15,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T09:32:15,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37187 2024-11-12T09:32:15,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37187 2024-11-12T09:32:15,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T09:32:15,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T09:32:15,276 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:15,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,277 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:15,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:15,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:15,278 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:15,278 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:15,279 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34819 2024-11-12T09:32:15,281 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34819 connecting to ZooKeeper ensemble=127.0.0.1:63479 2024-11-12T09:32:15,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,285 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,302 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348190x0, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:15,303 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34819-0x1012e6363050003 connected 2024-11-12T09:32:15,303 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:15,303 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:15,304 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:15,305 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:15,307 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:15,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34819 2024-11-12T09:32:15,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34819 2024-11-12T09:32:15,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34819 2024-11-12T09:32:15,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34819 2024-11-12T09:32:15,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34819 2024-11-12T09:32:15,324 DEBUG [M:0;106923ea030f:33601 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;106923ea030f:33601 2024-11-12T09:32:15,326 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/106923ea030f,33601,1731403934303 2024-11-12T09:32:15,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,341 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,341 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,344 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/106923ea030f,33601,1731403934303 2024-11-12T09:32:15,375 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:15,375 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:15,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:15,375 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,376 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,376 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T09:32:15,378 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/106923ea030f,33601,1731403934303 from backup master directory 2024-11-12T09:32:15,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/106923ea030f,33601,1731403934303 2024-11-12T09:32:15,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,386 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,386 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:15,387 WARN [master/106923ea030f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:15,387 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=106923ea030f,33601,1731403934303 2024-11-12T09:32:15,389 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T09:32:15,390 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T09:32:15,450 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/hbase.id] with ID: 17386135-fc15-4e2f-9d55-bb3637c4a8e2 2024-11-12T09:32:15,450 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/.tmp/hbase.id 2024-11-12T09:32:15,457 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,457 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34584 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34584 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:15,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-12T09:32:15,467 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:15,467 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/.tmp/hbase.id]:[hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/hbase.id] 2024-11-12T09:32:15,512 INFO [master/106923ea030f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:15,517 INFO [master/106923ea030f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T09:32:15,535 INFO [master/106923ea030f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-12T09:32:15,544 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,544 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:15,559 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,559 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,566 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34602 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34602 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:15,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-12T09:32:15,573 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:15,588 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T09:32:15,590 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T09:32:15,595 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T09:32:15,620 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,620 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,623 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:49756 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49756 dst: /127.0.0.1:36449 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:15,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-12T09:32:15,629 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:15,646 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store 2024-11-12T09:32:15,662 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,663 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:15,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:60866 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60866 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:15,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-12T09:32:15,671 WARN [master/106923ea030f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:15,675 INFO [master/106923ea030f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T09:32:15,678 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:15,679 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T09:32:15,679 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:15,680 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:15,681 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T09:32:15,681 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:15,682 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:15,683 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731403935679Disabling compacts and flushes for region at 1731403935679Disabling writes for close at 1731403935681 (+2 ms)Writing region close event to WAL at 1731403935682 (+1 ms)Closed at 1731403935682 2024-11-12T09:32:15,685 WARN [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/.initializing 2024-11-12T09:32:15,686 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/WALs/106923ea030f,33601,1731403934303 2024-11-12T09:32:15,694 INFO [master/106923ea030f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T09:32:15,709 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C33601%2C1731403934303, suffix=, logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/WALs/106923ea030f,33601,1731403934303, archiveDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/oldWALs, maxLogs=10 2024-11-12T09:32:15,736 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/WALs/106923ea030f,33601,1731403934303/106923ea030f%2C33601%2C1731403934303.1731403935714, exclude list is [], retry=0 2024-11-12T09:32:15,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:15,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37913,DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f,DISK] 2024-11-12T09:32:15,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36449,DS-46f738d4-202a-446b-839f-eccbf45ec130,DISK] 2024-11-12T09:32:15,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c,DISK] 2024-11-12T09:32:15,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-12T09:32:15,796 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/WALs/106923ea030f,33601,1731403934303/106923ea030f%2C33601%2C1731403934303.1731403935714 2024-11-12T09:32:15,797 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41707:41707),(127.0.0.1/127.0.0.1:34915:34915),(127.0.0.1/127.0.0.1:37487:37487)] 2024-11-12T09:32:15,798 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:15,798 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:15,802 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,803 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T09:32:15,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:15,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:15,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T09:32:15,869 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:15,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:15,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T09:32:15,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:15,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:15,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T09:32:15,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:15,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:15,878 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,881 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,882 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,886 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,887 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,892 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:15,895 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:15,901 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:15,902 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73259299, jitterRate=0.09164862334728241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:15,909 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731403935814Initializing all the Stores at 1731403935816 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403935817 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403935818 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403935818Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403935818Cleaning up temporary data from old regions at 1731403935887 (+69 ms)Region opened successfully at 1731403935909 (+22 ms) 2024-11-12T09:32:15,911 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T09:32:15,943 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f1b2c07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:15,969 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T09:32:15,979 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T09:32:15,979 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T09:32:15,981 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T09:32:15,983 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-12T09:32:15,987 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-12T09:32:15,987 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T09:32:16,010 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T09:32:16,019 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T09:32:16,070 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T09:32:16,075 INFO [master/106923ea030f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T09:32:16,077 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T09:32:16,088 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T09:32:16,090 INFO [master/106923ea030f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T09:32:16,093 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T09:32:16,101 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T09:32:16,103 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T09:32:16,112 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T09:32:16,133 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T09:32:16,143 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T09:32:16,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-12T09:32:16,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-12T09:32:16,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:16,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:16,154 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:16,154 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:16,154 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,155 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,158 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=106923ea030f,33601,1731403934303, sessionid=0x1012e6363050000, setting cluster-up flag (Was=false) 2024-11-12T09:32:16,186 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,186 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,218 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T09:32:16,223 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=106923ea030f,33601,1731403934303 2024-11-12T09:32:16,249 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,249 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:16,280 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T09:32:16,284 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=106923ea030f,33601,1731403934303 2024-11-12T09:32:16,292 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T09:32:16,313 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(746): ClusterId : 17386135-fc15-4e2f-9d55-bb3637c4a8e2 2024-11-12T09:32:16,313 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(746): ClusterId : 17386135-fc15-4e2f-9d55-bb3637c4a8e2 2024-11-12T09:32:16,313 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(746): ClusterId : 17386135-fc15-4e2f-9d55-bb3637c4a8e2 2024-11-12T09:32:16,316 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:16,316 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:16,316 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:16,332 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:16,332 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:16,332 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:16,332 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:16,332 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:16,332 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:16,355 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:16,355 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:16,355 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:16,356 DEBUG [RS:1;106923ea030f:37187 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3233aa62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:16,356 DEBUG [RS:0;106923ea030f:39993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12e9aa63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:16,356 DEBUG [RS:2;106923ea030f:34819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1d8ad1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:16,361 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:16,369 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;106923ea030f:37187 2024-11-12T09:32:16,372 INFO [RS:1;106923ea030f:37187 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:16,372 INFO [RS:1;106923ea030f:37187 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:16,372 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:16,373 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;106923ea030f:39993 2024-11-12T09:32:16,373 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;106923ea030f:34819 2024-11-12T09:32:16,373 INFO [master/106923ea030f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T09:32:16,374 INFO [RS:2;106923ea030f:34819 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:16,374 INFO [RS:0;106923ea030f:39993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:16,374 INFO [RS:2;106923ea030f:34819 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:16,374 INFO [RS:0;106923ea030f:39993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:16,374 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:16,374 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:16,374 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,33601,1731403934303 with port=37187, startcode=1731403935216 2024-11-12T09:32:16,375 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,33601,1731403934303 with port=34819, startcode=1731403935275 2024-11-12T09:32:16,375 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,33601,1731403934303 with port=39993, startcode=1731403935112 2024-11-12T09:32:16,381 INFO [master/106923ea030f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T09:32:16,385 DEBUG [RS:2;106923ea030f:34819 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:16,385 DEBUG [RS:1;106923ea030f:37187 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:16,385 DEBUG [RS:0;106923ea030f:39993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:16,386 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 106923ea030f,33601,1731403934303 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T09:32:16,393 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:16,393 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:16,393 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:16,393 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:16,393 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/106923ea030f:0, corePoolSize=10, maxPoolSize=10 2024-11-12T09:32:16,394 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,394 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:16,394 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,396 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731403966396 2024-11-12T09:32:16,398 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T09:32:16,399 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T09:32:16,402 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T09:32:16,402 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T09:32:16,403 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T09:32:16,403 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T09:32:16,403 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,407 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T09:32:16,408 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T09:32:16,408 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T09:32:16,410 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T09:32:16,411 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T09:32:16,411 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:16,412 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T09:32:16,416 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403936413,5,FailOnTimeoutGroup] 2024-11-12T09:32:16,418 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:16,419 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403936416,5,FailOnTimeoutGroup] 2024-11-12T09:32:16,419 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,419 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T09:32:16,419 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T09:32:16,421 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,422 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,423 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38733, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:16,423 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56641, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:16,423 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53215, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:16,433 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,37187,1731403935216 2024-11-12T09:32:16,435 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:16,435 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:16,436 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(517): Registering regionserver=106923ea030f,37187,1731403935216 2024-11-12T09:32:16,446 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:60912 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60912 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:16,451 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,34819,1731403935275 2024-11-12T09:32:16,451 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(517): Registering regionserver=106923ea030f,34819,1731403935275 2024-11-12T09:32:16,455 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 2024-11-12T09:32:16,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-12T09:32:16,455 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37231 2024-11-12T09:32:16,455 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:16,456 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,39993,1731403935112 2024-11-12T09:32:16,457 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33601 {}] master.ServerManager(517): Registering regionserver=106923ea030f,39993,1731403935112 2024-11-12T09:32:16,457 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:16,457 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 2024-11-12T09:32:16,457 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37231 2024-11-12T09:32:16,457 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:16,458 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T09:32:16,459 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 2024-11-12T09:32:16,463 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 2024-11-12T09:32:16,464 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37231 2024-11-12T09:32:16,464 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:16,474 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:16,474 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:16,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:60936 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60936 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:16,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-12T09:32:16,487 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:16,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:16,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T09:32:16,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T09:32:16,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:16,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:16,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T09:32:16,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T09:32:16,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:16,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:16,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T09:32:16,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T09:32:16,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:16,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:16,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T09:32:16,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T09:32:16,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:16,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:16,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T09:32:16,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740 2024-11-12T09:32:16,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740 2024-11-12T09:32:16,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T09:32:16,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T09:32:16,519 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:16,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T09:32:16,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:16,534 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:16,535 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60431417, jitterRate=-0.09950171411037445}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:16,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731403936489Initializing all the Stores at 1731403936491 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403936491Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403936491Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403936491Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403936491Cleaning up temporary data from old regions at 1731403936518 (+27 ms)Region opened successfully at 1731403936539 (+21 ms) 2024-11-12T09:32:16,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T09:32:16,539 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T09:32:16,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T09:32:16,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T09:32:16,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T09:32:16,541 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:16,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731403936539Disabling compacts and flushes for region at 1731403936539Disabling writes for close at 1731403936539Writing region close event to WAL at 1731403936540 (+1 ms)Closed at 1731403936541 (+1 ms) 2024-11-12T09:32:16,544 DEBUG [RS:1;106923ea030f:37187 {}] zookeeper.ZKUtil(111): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,37187,1731403935216 2024-11-12T09:32:16,544 DEBUG [RS:2;106923ea030f:34819 {}] zookeeper.ZKUtil(111): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,34819,1731403935275 2024-11-12T09:32:16,544 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:16,544 WARN [RS:2;106923ea030f:34819 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:16,544 WARN [RS:1;106923ea030f:37187 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:16,544 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T09:32:16,544 INFO [RS:2;106923ea030f:34819 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T09:32:16,544 INFO [RS:1;106923ea030f:37187 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T09:32:16,544 DEBUG [RS:0;106923ea030f:39993 {}] zookeeper.ZKUtil(111): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,39993,1731403935112 2024-11-12T09:32:16,545 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275 2024-11-12T09:32:16,545 WARN [RS:0;106923ea030f:39993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:16,545 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,37187,1731403935216 2024-11-12T09:32:16,545 INFO [RS:0;106923ea030f:39993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T09:32:16,545 DEBUG [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,39993,1731403935112 2024-11-12T09:32:16,546 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,37187,1731403935216] 2024-11-12T09:32:16,546 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,34819,1731403935275] 2024-11-12T09:32:16,546 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,39993,1731403935112] 2024-11-12T09:32:16,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T09:32:16,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T09:32:16,569 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T09:32:16,576 INFO [RS:2;106923ea030f:34819 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:16,576 INFO [RS:0;106923ea030f:39993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:16,576 INFO [RS:1;106923ea030f:37187 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:16,592 INFO [RS:2;106923ea030f:34819 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:16,592 INFO [RS:0;106923ea030f:39993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:16,592 INFO [RS:1;106923ea030f:37187 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:16,599 INFO [RS:0;106923ea030f:39993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:16,599 INFO [RS:1;106923ea030f:37187 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:16,599 INFO [RS:2;106923ea030f:34819 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:16,599 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,600 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,600 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,601 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:16,601 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:16,601 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:16,607 INFO [RS:1;106923ea030f:37187 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:16,607 INFO [RS:2;106923ea030f:34819 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:16,607 INFO [RS:0;106923ea030f:39993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:16,609 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,609 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,609 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:16,610 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,610 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,611 DEBUG [RS:2;106923ea030f:34819 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,611 DEBUG [RS:1;106923ea030f:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,611 DEBUG [RS:0;106923ea030f:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:16,616 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,37187,1731403935216-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,617 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,618 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,618 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,618 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,39993,1731403935112-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:16,618 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,619 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,619 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,619 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,619 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,619 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,34819,1731403935275-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:16,637 INFO [RS:1;106923ea030f:37187 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:16,637 INFO [RS:2;106923ea030f:34819 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:16,639 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,37187,1731403935216-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,639 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,34819,1731403935275-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,639 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,639 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,639 INFO [RS:0;106923ea030f:39993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:16,639 INFO [RS:1;106923ea030f:37187 {}] regionserver.Replication(171): 106923ea030f,37187,1731403935216 started 2024-11-12T09:32:16,639 INFO [RS:2;106923ea030f:34819 {}] regionserver.Replication(171): 106923ea030f,34819,1731403935275 started 2024-11-12T09:32:16,639 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,39993,1731403935112-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,639 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,640 INFO [RS:0;106923ea030f:39993 {}] regionserver.Replication(171): 106923ea030f,39993,1731403935112 started 2024-11-12T09:32:16,659 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,659 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,37187,1731403935216, RpcServer on 106923ea030f/172.17.0.2:37187, sessionid=0x1012e6363050002 2024-11-12T09:32:16,660 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:16,660 DEBUG [RS:1;106923ea030f:37187 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,37187,1731403935216 2024-11-12T09:32:16,660 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,37187,1731403935216' 2024-11-12T09:32:16,660 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:16,661 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:16,661 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,662 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,39993,1731403935112, RpcServer on 106923ea030f/172.17.0.2:39993, sessionid=0x1012e6363050001 2024-11-12T09:32:16,662 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:16,662 DEBUG [RS:0;106923ea030f:39993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,39993,1731403935112 2024-11-12T09:32:16,662 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:16,662 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,39993,1731403935112' 2024-11-12T09:32:16,662 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:16,662 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:16,662 DEBUG [RS:1;106923ea030f:37187 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,37187,1731403935216 2024-11-12T09:32:16,662 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,37187,1731403935216' 2024-11-12T09:32:16,662 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:16,662 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:16,663 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,34819,1731403935275, RpcServer on 106923ea030f/172.17.0.2:34819, sessionid=0x1012e6363050003 2024-11-12T09:32:16,663 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:16,663 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:16,663 DEBUG [RS:2;106923ea030f:34819 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,34819,1731403935275 2024-11-12T09:32:16,663 DEBUG [RS:1;106923ea030f:37187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:16,663 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,34819,1731403935275' 2024-11-12T09:32:16,663 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:16,663 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:16,663 DEBUG [RS:1;106923ea030f:37187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:16,663 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:16,663 DEBUG [RS:0;106923ea030f:39993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,39993,1731403935112 2024-11-12T09:32:16,663 INFO [RS:1;106923ea030f:37187 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:16,664 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,39993,1731403935112' 2024-11-12T09:32:16,664 INFO [RS:1;106923ea030f:37187 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:16,664 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:16,664 DEBUG [RS:0;106923ea030f:39993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,34819,1731403935275 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,34819,1731403935275' 2024-11-12T09:32:16,664 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:16,665 DEBUG [RS:2;106923ea030f:34819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:16,665 DEBUG [RS:0;106923ea030f:39993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:16,665 INFO [RS:0;106923ea030f:39993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:16,665 INFO [RS:0;106923ea030f:39993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:16,665 DEBUG [RS:2;106923ea030f:34819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:16,666 INFO [RS:2;106923ea030f:34819 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:16,666 INFO [RS:2;106923ea030f:34819 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:16,719 WARN [106923ea030f:33601 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T09:32:16,769 INFO [RS:1;106923ea030f:37187 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T09:32:16,769 INFO [RS:2;106923ea030f:34819 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T09:32:16,769 INFO [RS:0;106923ea030f:39993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T09:32:16,772 INFO [RS:1;106923ea030f:37187 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C37187%2C1731403935216, suffix=, logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,37187,1731403935216, archiveDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs, maxLogs=32 2024-11-12T09:32:16,772 INFO [RS:0;106923ea030f:39993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C39993%2C1731403935112, suffix=, logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,39993,1731403935112, archiveDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs, maxLogs=32 2024-11-12T09:32:16,772 INFO [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C34819%2C1731403935275, suffix=, logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275, archiveDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs, maxLogs=32 2024-11-12T09:32:16,788 DEBUG [RS:1;106923ea030f:37187 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,37187,1731403935216/106923ea030f%2C37187%2C1731403935216.1731403936775, exclude list is [], retry=0 2024-11-12T09:32:16,788 DEBUG [RS:0;106923ea030f:39993 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,39993,1731403935112/106923ea030f%2C39993%2C1731403935112.1731403936775, exclude list is [], retry=0 2024-11-12T09:32:16,791 DEBUG [RS:2;106923ea030f:34819 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275/106923ea030f%2C34819%2C1731403935275.1731403936776, exclude list is [], retry=0 2024-11-12T09:32:16,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c,DISK] 2024-11-12T09:32:16,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37913,DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f,DISK] 2024-11-12T09:32:16,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37913,DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f,DISK] 2024-11-12T09:32:16,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c,DISK] 2024-11-12T09:32:16,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36449,DS-46f738d4-202a-446b-839f-eccbf45ec130,DISK] 2024-11-12T09:32:16,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36449,DS-46f738d4-202a-446b-839f-eccbf45ec130,DISK] 2024-11-12T09:32:16,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c,DISK] 2024-11-12T09:32:16,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36449,DS-46f738d4-202a-446b-839f-eccbf45ec130,DISK] 2024-11-12T09:32:16,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37913,DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f,DISK] 2024-11-12T09:32:16,816 INFO [RS:1;106923ea030f:37187 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,37187,1731403935216/106923ea030f%2C37187%2C1731403935216.1731403936775 2024-11-12T09:32:16,819 DEBUG [RS:1;106923ea030f:37187 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41707:41707),(127.0.0.1/127.0.0.1:37487:37487),(127.0.0.1/127.0.0.1:34915:34915)] 2024-11-12T09:32:16,819 INFO [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275/106923ea030f%2C34819%2C1731403935275.1731403936776 2024-11-12T09:32:16,820 DEBUG [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41707:41707),(127.0.0.1/127.0.0.1:37487:37487),(127.0.0.1/127.0.0.1:34915:34915)] 2024-11-12T09:32:16,820 INFO [RS:0;106923ea030f:39993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,39993,1731403935112/106923ea030f%2C39993%2C1731403935112.1731403936775 2024-11-12T09:32:16,821 DEBUG [RS:0;106923ea030f:39993 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34915:34915),(127.0.0.1/127.0.0.1:41707:41707),(127.0.0.1/127.0.0.1:37487:37487)] 2024-11-12T09:32:16,973 DEBUG [106923ea030f:33601 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T09:32:16,986 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(204): Hosts are {106923ea030f=0} racks are {/default-rack=0} 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T09:32:16,994 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T09:32:16,994 INFO [106923ea030f:33601 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T09:32:16,994 INFO [106923ea030f:33601 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T09:32:16,994 INFO [106923ea030f:33601 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T09:32:16,995 DEBUG [106923ea030f:33601 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T09:32:17,001 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=106923ea030f,34819,1731403935275 2024-11-12T09:32:17,008 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 106923ea030f,34819,1731403935275, state=OPENING 2024-11-12T09:32:17,059 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T09:32:17,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:17,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:17,070 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:17,070 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:17,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,073 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T09:32:17,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=106923ea030f,34819,1731403935275}] 2024-11-12T09:32:17,259 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T09:32:17,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53693, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T09:32:17,274 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T09:32:17,274 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T09:32:17,274 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-12T09:32:17,278 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C34819%2C1731403935275.meta, suffix=.meta, logDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275, archiveDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs, maxLogs=32 2024-11-12T09:32:17,297 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275/106923ea030f%2C34819%2C1731403935275.meta.1731403937282.meta, exclude list is [], retry=0 2024-11-12T09:32:17,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-9fae446a-ddae-4ad0-8361-fc3ea4039c7c,DISK] 2024-11-12T09:32:17,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36449,DS-46f738d4-202a-446b-839f-eccbf45ec130,DISK] 2024-11-12T09:32:17,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37913,DS-d7c065bf-0747-4e22-aa9e-afd0d437e11f,DISK] 2024-11-12T09:32:17,304 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/WALs/106923ea030f,34819,1731403935275/106923ea030f%2C34819%2C1731403935275.meta.1731403937282.meta 2024-11-12T09:32:17,305 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34915:34915),(127.0.0.1/127.0.0.1:37487:37487),(127.0.0.1/127.0.0.1:41707:41707)] 2024-11-12T09:32:17,305 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:17,307 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T09:32:17,309 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T09:32:17,313 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T09:32:17,317 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T09:32:17,318 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:17,318 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T09:32:17,318 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T09:32:17,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T09:32:17,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T09:32:17,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:17,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:17,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T09:32:17,325 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T09:32:17,325 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:17,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:17,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T09:32:17,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T09:32:17,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:17,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:17,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T09:32:17,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T09:32:17,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:17,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:17,331 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T09:32:17,332 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740 2024-11-12T09:32:17,335 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740 2024-11-12T09:32:17,338 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T09:32:17,338 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T09:32:17,339 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:17,342 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T09:32:17,343 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69050419, jitterRate=0.028931424021720886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:17,344 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T09:32:17,345 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731403937318Writing region info on filesystem at 1731403937318Initializing all the Stores at 1731403937320 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403937321 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403937321Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403937321Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403937321Cleaning up temporary data from old regions at 1731403937338 (+17 ms)Running coprocessor post-open hooks at 1731403937344 (+6 ms)Region opened successfully at 1731403937345 (+1 ms) 2024-11-12T09:32:17,354 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731403937252 2024-11-12T09:32:17,366 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T09:32:17,366 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T09:32:17,368 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=106923ea030f,34819,1731403935275 2024-11-12T09:32:17,371 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 106923ea030f,34819,1731403935275, state=OPEN 2024-11-12T09:32:17,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:17,382 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:17,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:17,382 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:17,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:17,383 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=106923ea030f,34819,1731403935275 2024-11-12T09:32:17,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T09:32:17,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=106923ea030f,34819,1731403935275 in 307 msec 2024-11-12T09:32:17,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T09:32:17,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 839 msec 2024-11-12T09:32:17,398 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:17,398 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T09:32:17,416 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T09:32:17,417 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=106923ea030f,34819,1731403935275, seqNum=-1] 2024-11-12T09:32:17,435 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:17,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32899, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:17,458 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1350 sec 2024-11-12T09:32:17,458 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731403937458, completionTime=-1 2024-11-12T09:32:17,460 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T09:32:17,461 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T09:32:17,487 INFO [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T09:32:17,487 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731403997487 2024-11-12T09:32:17,487 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731404057487 2024-11-12T09:32:17,487 INFO [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 26 msec 2024-11-12T09:32:17,489 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T09:32:17,496 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,496 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,496 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,498 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-106923ea030f:33601, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,499 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,499 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,505 DEBUG [master/106923ea030f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T09:32:17,551 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.164sec 2024-11-12T09:32:17,552 INFO [master/106923ea030f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T09:32:17,553 INFO [master/106923ea030f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T09:32:17,554 INFO [master/106923ea030f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T09:32:17,554 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T09:32:17,555 INFO [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T09:32:17,555 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:17,556 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T09:32:17,560 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T09:32:17,561 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T09:32:17,561 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,33601,1731403934303-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:17,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f14512f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:17,628 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T09:32:17,628 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T09:32:17,632 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 106923ea030f,33601,-1 for getting cluster id 2024-11-12T09:32:17,634 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T09:32:17,641 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '17386135-fc15-4e2f-9d55-bb3637c4a8e2' 2024-11-12T09:32:17,643 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T09:32:17,643 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "17386135-fc15-4e2f-9d55-bb3637c4a8e2" 2024-11-12T09:32:17,643 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e0ad479, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:17,644 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [106923ea030f,33601,-1] 2024-11-12T09:32:17,646 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T09:32:17,647 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:17,649 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T09:32:17,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16c8a73d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:17,652 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T09:32:17,659 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=106923ea030f,34819,1731403935275, seqNum=-1] 2024-11-12T09:32:17,660 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:17,662 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38768, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:17,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=106923ea030f,33601,1731403934303 2024-11-12T09:32:17,691 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T09:32:17,696 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 106923ea030f,33601,1731403934303 2024-11-12T09:32:17,698 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@517aaee6 2024-11-12T09:32:17,699 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T09:32:17,701 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T09:32:17,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T09:32:17,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T09:32:17,718 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T09:32:17,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T09:32:17,721 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:17,724 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T09:32:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:17,732 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:17,732 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:17,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:32776 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32776 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-12T09:32:17,743 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:17,746 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4f8777bef3dbd1db49037e5e52fc68c3, NAME => 'TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68 2024-11-12T09:32:17,752 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:17,752 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:17,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34684 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34684 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:17,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-12T09:32:17,763 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:17,763 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:17,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4f8777bef3dbd1db49037e5e52fc68c3, disabling compactions & flushes 2024-11-12T09:32:17,764 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:17,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:17,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. after waiting 0 ms 2024-11-12T09:32:17,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:17,764 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:17,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4f8777bef3dbd1db49037e5e52fc68c3: Waiting for close lock at 1731403937764Disabling compacts and flushes for region at 1731403937764Disabling writes for close at 1731403937764Writing region close event to WAL at 1731403937764Closed at 1731403937764 2024-11-12T09:32:17,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T09:32:17,771 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731403937767"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731403937767"}]},"ts":"1731403937767"} 2024-11-12T09:32:17,776 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T09:32:17,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T09:32:17,781 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731403937778"}]},"ts":"1731403937778"} 2024-11-12T09:32:17,785 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T09:32:17,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {106923ea030f=0} racks are {/default-rack=0} 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T09:32:17,787 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T09:32:17,788 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T09:32:17,788 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T09:32:17,788 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T09:32:17,788 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T09:32:17,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f8777bef3dbd1db49037e5e52fc68c3, ASSIGN}] 2024-11-12T09:32:17,792 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f8777bef3dbd1db49037e5e52fc68c3, ASSIGN 2024-11-12T09:32:17,794 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f8777bef3dbd1db49037e5e52fc68c3, ASSIGN; state=OFFLINE, location=106923ea030f,37187,1731403935216; forceNewPlan=false, retain=false 2024-11-12T09:32:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:17,950 INFO [106923ea030f:33601 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T09:32:17,952 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f8777bef3dbd1db49037e5e52fc68c3, regionState=OPENING, regionLocation=106923ea030f,37187,1731403935216 2024-11-12T09:32:17,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f8777bef3dbd1db49037e5e52fc68c3, ASSIGN because future has completed 2024-11-12T09:32:17,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f8777bef3dbd1db49037e5e52fc68c3, server=106923ea030f,37187,1731403935216}] 2024-11-12T09:32:18,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:18,114 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T09:32:18,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41703, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T09:32:18,127 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:18,127 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f8777bef3dbd1db49037e5e52fc68c3, NAME => 'TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:18,128 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,128 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:18,128 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,128 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,130 INFO [StoreOpener-4f8777bef3dbd1db49037e5e52fc68c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,133 INFO [StoreOpener-4f8777bef3dbd1db49037e5e52fc68c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f8777bef3dbd1db49037e5e52fc68c3 columnFamilyName cf 2024-11-12T09:32:18,133 DEBUG [StoreOpener-4f8777bef3dbd1db49037e5e52fc68c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:18,134 INFO [StoreOpener-4f8777bef3dbd1db49037e5e52fc68c3-1 {}] regionserver.HStore(327): Store=4f8777bef3dbd1db49037e5e52fc68c3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:18,134 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,136 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,137 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,137 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,137 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,141 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,146 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:18,147 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4f8777bef3dbd1db49037e5e52fc68c3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73890103, jitterRate=0.10104833543300629}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T09:32:18,148 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:18,149 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4f8777bef3dbd1db49037e5e52fc68c3: Running coprocessor pre-open hook at 1731403938128Writing region info on filesystem at 1731403938128Initializing all the Stores at 1731403938130 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403938130Cleaning up temporary data from old regions at 1731403938138 (+8 ms)Running coprocessor post-open hooks at 1731403938148 (+10 ms)Region opened successfully at 1731403938149 (+1 ms) 2024-11-12T09:32:18,152 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3., pid=6, masterSystemTime=1731403938113 2024-11-12T09:32:18,156 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:18,156 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:18,157 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f8777bef3dbd1db49037e5e52fc68c3, regionState=OPEN, openSeqNum=2, regionLocation=106923ea030f,37187,1731403935216 2024-11-12T09:32:18,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f8777bef3dbd1db49037e5e52fc68c3, server=106923ea030f,37187,1731403935216 because future has completed 2024-11-12T09:32:18,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T09:32:18,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4f8777bef3dbd1db49037e5e52fc68c3, server=106923ea030f,37187,1731403935216 in 205 msec 2024-11-12T09:32:18,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T09:32:18,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f8777bef3dbd1db49037e5e52fc68c3, ASSIGN in 380 msec 2024-11-12T09:32:18,174 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T09:32:18,175 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731403938174"}]},"ts":"1731403938174"} 2024-11-12T09:32:18,177 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T09:32:18,179 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T09:32:18,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 469 msec 2024-11-12T09:32:18,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:18,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T09:32:18,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T09:32:18,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T09:32:18,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T09:32:18,368 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T09:32:18,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T09:32:18,376 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3., hostname=106923ea030f,37187,1731403935216, seqNum=2] 2024-11-12T09:32:18,377 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:18,379 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54464, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:18,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-12T09:32:18,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T09:32:18,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:18,396 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T09:32:18,398 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T09:32:18,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T09:32:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:18,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37187 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T09:32:18,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:18,569 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4f8777bef3dbd1db49037e5e52fc68c3 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T09:32:18,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/.tmp/cf/971756e219e24688bed5b68614de7458 is 36, key is row/cf:cq/1731403938380/Put/seqid=0 2024-11-12T09:32:18,625 WARN [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:18,625 WARN [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:18,629 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-24998604_22 at /127.0.0.1:34696 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34696 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:18,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-12T09:32:18,634 WARN [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:18,635 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/.tmp/cf/971756e219e24688bed5b68614de7458 2024-11-12T09:32:18,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/.tmp/cf/971756e219e24688bed5b68614de7458 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/cf/971756e219e24688bed5b68614de7458 2024-11-12T09:32:18,690 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/cf/971756e219e24688bed5b68614de7458, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T09:32:18,699 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4f8777bef3dbd1db49037e5e52fc68c3 in 129ms, sequenceid=5, compaction requested=false 2024-11-12T09:32:18,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-12T09:32:18,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4f8777bef3dbd1db49037e5e52fc68c3: 2024-11-12T09:32:18,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:18,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T09:32:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T09:32:18,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T09:32:18,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 311 msec 2024-11-12T09:32:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:18,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 325 msec 2024-11-12T09:32:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:19,027 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T09:32:19,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T09:32:19,046 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T09:32:19,046 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:19,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,050 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T09:32:19,051 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T09:32:19,051 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1904156029, stopped=false 2024-11-12T09:32:19,051 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=106923ea030f,33601,1731403934303 2024-11-12T09:32:19,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:19,102 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:19,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:19,102 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:19,102 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:19,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:19,102 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:19,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:19,103 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T09:32:19,104 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T09:32:19,104 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:19,104 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:19,104 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:19,104 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:19,105 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:19,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,106 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,39993,1731403935112' ***** 2024-11-12T09:32:19,107 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:19,107 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,37187,1731403935216' ***** 2024-11-12T09:32:19,107 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:19,107 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,34819,1731403935275' ***** 2024-11-12T09:32:19,107 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:19,107 INFO [RS:0;106923ea030f:39993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:19,107 INFO [RS:1;106923ea030f:37187 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:19,107 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:19,107 INFO [RS:1;106923ea030f:37187 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:19,107 INFO [RS:0;106923ea030f:39993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:19,108 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:19,108 INFO [RS:0;106923ea030f:39993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:19,108 INFO [RS:1;106923ea030f:37187 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:19,108 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,39993,1731403935112 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:19,108 INFO [RS:0;106923ea030f:39993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,34819,1731403935275 2024-11-12T09:32:19,108 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(3091): Received CLOSE for 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:19,108 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:19,108 INFO [RS:0;106923ea030f:39993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;106923ea030f:39993. 2024-11-12T09:32:19,108 INFO [RS:2;106923ea030f:34819 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;106923ea030f:34819. 2024-11-12T09:32:19,109 DEBUG [RS:0;106923ea030f:39993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:19,109 DEBUG [RS:2;106923ea030f:34819 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:19,109 DEBUG [RS:0;106923ea030f:39993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,109 DEBUG [RS:2;106923ea030f:34819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,109 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,37187,1731403935216 2024-11-12T09:32:19,109 INFO [RS:1;106923ea030f:37187 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:19,109 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,39993,1731403935112; all regions closed. 2024-11-12T09:32:19,109 INFO [RS:2;106923ea030f:34819 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:19,109 INFO [RS:2;106923ea030f:34819 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:19,109 INFO [RS:1;106923ea030f:37187 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;106923ea030f:37187. 2024-11-12T09:32:19,109 INFO [RS:2;106923ea030f:34819 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:19,109 DEBUG [RS:1;106923ea030f:37187 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:19,109 DEBUG [RS:1;106923ea030f:37187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,109 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T09:32:19,109 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T09:32:19,109 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4f8777bef3dbd1db49037e5e52fc68c3, disabling compactions & flushes 2024-11-12T09:32:19,109 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1325): Online Regions={4f8777bef3dbd1db49037e5e52fc68c3=TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3.} 2024-11-12T09:32:19,109 INFO [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. after waiting 0 ms 2024-11-12T09:32:19,110 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T09:32:19,110 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:19,110 DEBUG [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1351): Waiting on 4f8777bef3dbd1db49037e5e52fc68c3 2024-11-12T09:32:19,110 DEBUG [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T09:32:19,110 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T09:32:19,110 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T09:32:19,111 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T09:32:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_1073741826_1016 (size=93) 2024-11-12T09:32:19,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_1073741826_1016 (size=93) 2024-11-12T09:32:19,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741826_1016 (size=93) 2024-11-12T09:32:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-12T09:32:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-12T09:32:19,121 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,121 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,121 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-12T09:32:19,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-12T09:32:19,125 DEBUG [RS:0;106923ea030f:39993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs 2024-11-12T09:32:19,125 INFO [RS:0;106923ea030f:39993 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 106923ea030f%2C39993%2C1731403935112:(num 1731403936775) 2024-11-12T09:32:19,125 DEBUG [RS:0;106923ea030f:39993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,125 INFO [RS:0;106923ea030f:39993 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,125 INFO [RS:0;106923ea030f:39993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:19,126 INFO [RS:0;106923ea030f:39993 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:19,126 INFO [RS:0;106923ea030f:39993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:19,126 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:19,126 INFO [RS:0;106923ea030f:39993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:19,126 INFO [RS:0;106923ea030f:39993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:19,126 INFO [RS:0;106923ea030f:39993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:19,127 INFO [RS:0;106923ea030f:39993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39993 2024-11-12T09:32:19,132 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/default/TestHBaseWalOnEC/4f8777bef3dbd1db49037e5e52fc68c3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T09:32:19,135 INFO [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:19,135 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4f8777bef3dbd1db49037e5e52fc68c3: Waiting for close lock at 1731403939109Running coprocessor pre-close hooks at 1731403939109Disabling compacts and flushes for region at 1731403939109Disabling writes for close at 1731403939110 (+1 ms)Writing region close event to WAL at 1731403939111 (+1 ms)Running coprocessor post-close hooks at 1731403939134 (+23 ms)Closed at 1731403939135 (+1 ms) 2024-11-12T09:32:19,135 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3. 2024-11-12T09:32:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,39993,1731403935112 2024-11-12T09:32:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:19,140 INFO [RS:0;106923ea030f:39993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:19,147 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/info/b7604a9d0ff64978b700bc532891d156 is 153, key is TestHBaseWalOnEC,,1731403937703.4f8777bef3dbd1db49037e5e52fc68c3./info:regioninfo/1731403938157/Put/seqid=0 2024-11-12T09:32:19,149 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,150 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,151 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,39993,1731403935112] 2024-11-12T09:32:19,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810016785_22 at /127.0.0.1:32812 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32812 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-12T09:32:19,159 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,159 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/info/b7604a9d0ff64978b700bc532891d156 2024-11-12T09:32:19,161 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,39993,1731403935112 already deleted, retry=false 2024-11-12T09:32:19,161 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,39993,1731403935112 expired; onlineServers=2 2024-11-12T09:32:19,186 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/ns/24cfd37984334b35abdcd16b13658422 is 43, key is default/ns:d/1731403937442/Put/seqid=0 2024-11-12T09:32:19,188 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,188 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,192 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810016785_22 at /127.0.0.1:49840 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49840 dst: /127.0.0.1:36449 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-12T09:32:19,197 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,197 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/ns/24cfd37984334b35abdcd16b13658422 2024-11-12T09:32:19,226 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/table/88e1787b019b405e87af5e2442a5b1de is 52, key is TestHBaseWalOnEC/table:state/1731403938174/Put/seqid=0 2024-11-12T09:32:19,228 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,228 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810016785_22 at /127.0.0.1:34746 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34746 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-12T09:32:19,236 WARN [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,236 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/table/88e1787b019b405e87af5e2442a5b1de 2024-11-12T09:32:19,247 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/info/b7604a9d0ff64978b700bc532891d156 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/info/b7604a9d0ff64978b700bc532891d156 2024-11-12T09:32:19,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x1012e6363050001, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,252 INFO [RS:0;106923ea030f:39993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:19,252 INFO [RS:0;106923ea030f:39993 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,39993,1731403935112; zookeeper connection closed. 2024-11-12T09:32:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-12T09:32:19,253 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7fb343d9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7fb343d9 2024-11-12T09:32:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-12T09:32:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-12T09:32:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-12T09:32:19,260 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/info/b7604a9d0ff64978b700bc532891d156, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T09:32:19,262 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/ns/24cfd37984334b35abdcd16b13658422 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/ns/24cfd37984334b35abdcd16b13658422 2024-11-12T09:32:19,272 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/ns/24cfd37984334b35abdcd16b13658422, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T09:32:19,274 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/.tmp/table/88e1787b019b405e87af5e2442a5b1de as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/table/88e1787b019b405e87af5e2442a5b1de 2024-11-12T09:32:19,284 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/table/88e1787b019b405e87af5e2442a5b1de, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T09:32:19,285 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false 2024-11-12T09:32:19,285 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T09:32:19,295 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T09:32:19,296 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T09:32:19,296 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:19,296 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731403939110Running coprocessor pre-close hooks at 1731403939110Disabling compacts and flushes for region at 1731403939110Disabling writes for close at 1731403939110Obtaining lock to block concurrent updates at 1731403939111 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731403939111Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731403939111Flushing stores of hbase:meta,,1.1588230740 at 1731403939113 (+2 ms)Flushing 1588230740/info: creating writer at 1731403939113Flushing 1588230740/info: appending metadata at 1731403939141 (+28 ms)Flushing 1588230740/info: closing flushed file at 1731403939141Flushing 1588230740/ns: creating writer at 1731403939170 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731403939185 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731403939185Flushing 1588230740/table: creating writer at 1731403939208 (+23 ms)Flushing 1588230740/table: appending metadata at 1731403939224 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731403939224Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@515ee98c: reopening flushed file at 1731403939246 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@216c0cbb: reopening flushed file at 1731403939260 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46c8fada: reopening flushed file at 1731403939272 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false at 1731403939285 (+13 ms)Writing region close event to WAL at 1731403939288 (+3 ms)Running coprocessor post-close hooks at 1731403939296 (+8 ms)Closed at 1731403939296 2024-11-12T09:32:19,296 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:19,310 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,37187,1731403935216; all regions closed. 2024-11-12T09:32:19,310 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,34819,1731403935275; all regions closed. 2024-11-12T09:32:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_1073741829_1019 (size=2751) 2024-11-12T09:32:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741829_1019 (size=2751) 2024-11-12T09:32:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741827_1017 (size=1298) 2024-11-12T09:32:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_1073741827_1017 (size=1298) 2024-11-12T09:32:19,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_1073741829_1019 (size=2751) 2024-11-12T09:32:19,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_1073741827_1017 (size=1298) 2024-11-12T09:32:19,320 DEBUG [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs 2024-11-12T09:32:19,321 INFO [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 106923ea030f%2C34819%2C1731403935275.meta:.meta(num 1731403937282) 2024-11-12T09:32:19,323 DEBUG [RS:1;106923ea030f:37187 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs 2024-11-12T09:32:19,323 INFO [RS:1;106923ea030f:37187 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 106923ea030f%2C37187%2C1731403935216:(num 1731403936775) 2024-11-12T09:32:19,323 DEBUG [RS:1;106923ea030f:37187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,323 INFO [RS:1;106923ea030f:37187 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,323 INFO [RS:1;106923ea030f:37187 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:19,323 INFO [RS:1;106923ea030f:37187 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:19,323 INFO [RS:1;106923ea030f:37187 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:19,324 INFO [RS:1;106923ea030f:37187 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:19,324 INFO [RS:1;106923ea030f:37187 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:19,324 INFO [RS:1;106923ea030f:37187 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:19,324 INFO [RS:1;106923ea030f:37187 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37187 2024-11-12T09:32:19,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_1073741828_1018 (size=93) 2024-11-12T09:32:19,324 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:19,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741828_1018 (size=93) 2024-11-12T09:32:19,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_1073741828_1018 (size=93) 2024-11-12T09:32:19,329 DEBUG [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/oldWALs 2024-11-12T09:32:19,329 INFO [RS:2;106923ea030f:34819 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 106923ea030f%2C34819%2C1731403935275:(num 1731403936776) 2024-11-12T09:32:19,329 DEBUG [RS:2;106923ea030f:34819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:19,329 INFO [RS:2;106923ea030f:34819 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:19,329 INFO [RS:2;106923ea030f:34819 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:19,329 INFO [RS:2;106923ea030f:34819 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:19,330 INFO [RS:2;106923ea030f:34819 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:19,330 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:19,330 INFO [RS:2;106923ea030f:34819 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34819 2024-11-12T09:32:19,351 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,37187,1731403935216 2024-11-12T09:32:19,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:19,351 INFO [RS:1;106923ea030f:37187 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:19,361 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,34819,1731403935275 2024-11-12T09:32:19,361 INFO [RS:2;106923ea030f:34819 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:19,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,37187,1731403935216] 2024-11-12T09:32:19,392 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,37187,1731403935216 already deleted, retry=false 2024-11-12T09:32:19,393 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,37187,1731403935216 expired; onlineServers=1 2024-11-12T09:32:19,393 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,34819,1731403935275] 2024-11-12T09:32:19,403 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,34819,1731403935275 already deleted, retry=false 2024-11-12T09:32:19,403 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,34819,1731403935275 expired; onlineServers=0 2024-11-12T09:32:19,403 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '106923ea030f,33601,1731403934303' ***** 2024-11-12T09:32:19,403 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T09:32:19,404 INFO [M:0;106923ea030f:33601 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:19,404 INFO [M:0;106923ea030f:33601 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:19,404 DEBUG [M:0;106923ea030f:33601 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T09:32:19,404 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T09:32:19,404 DEBUG [M:0;106923ea030f:33601 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T09:32:19,404 DEBUG [master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403936416 {}] cleaner.HFileCleaner(306): Exit Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403936416,5,FailOnTimeoutGroup] 2024-11-12T09:32:19,404 DEBUG [master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403936413 {}] cleaner.HFileCleaner(306): Exit Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403936413,5,FailOnTimeoutGroup] 2024-11-12T09:32:19,405 INFO [M:0;106923ea030f:33601 {}] hbase.ChoreService(370): Chore service for: master/106923ea030f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:19,405 INFO [M:0;106923ea030f:33601 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:19,405 DEBUG [M:0;106923ea030f:33601 {}] master.HMaster(1795): Stopping service threads 2024-11-12T09:32:19,406 INFO [M:0;106923ea030f:33601 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T09:32:19,406 INFO [M:0;106923ea030f:33601 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T09:32:19,407 INFO [M:0;106923ea030f:33601 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T09:32:19,407 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T09:32:19,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:19,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:19,418 DEBUG [M:0;106923ea030f:33601 {}] zookeeper.ZKUtil(347): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T09:32:19,418 WARN [M:0;106923ea030f:33601 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T09:32:19,419 INFO [M:0;106923ea030f:33601 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/.lastflushedseqids 2024-11-12T09:32:19,433 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,433 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:32846 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32846 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-12T09:32:19,440 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,440 INFO [M:0;106923ea030f:33601 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T09:32:19,440 INFO [M:0;106923ea030f:33601 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T09:32:19,440 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T09:32:19,440 INFO [M:0;106923ea030f:33601 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:19,441 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:19,441 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T09:32:19,441 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:19,441 INFO [M:0;106923ea030f:33601 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-12T09:32:19,463 DEBUG [M:0;106923ea030f:33601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61457eb5a53d4521b2da04cba648ca94 is 82, key is hbase:meta,,1/info:regioninfo/1731403937367/Put/seqid=0 2024-11-12T09:32:19,465 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,465 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34774 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34774 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-12T09:32:19,472 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,472 INFO [RS:1;106923ea030f:37187 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:19,472 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1012e6363050002, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,472 INFO [RS:1;106923ea030f:37187 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,37187,1731403935216; zookeeper connection closed. 2024-11-12T09:32:19,472 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,472 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@905608b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@905608b 2024-11-12T09:32:19,472 INFO [M:0;106923ea030f:33601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61457eb5a53d4521b2da04cba648ca94 2024-11-12T09:32:19,482 INFO [RS:2;106923ea030f:34819 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:19,482 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,482 INFO [RS:2;106923ea030f:34819 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,34819,1731403935275; zookeeper connection closed. 2024-11-12T09:32:19,482 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34819-0x1012e6363050003, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,483 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63c4c0cd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63c4c0cd 2024-11-12T09:32:19,483 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T09:32:19,500 DEBUG [M:0;106923ea030f:33601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/373f8ae507064ab18df7f5606edcbb11 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731403938182/Put/seqid=0 2024-11-12T09:32:19,503 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,503 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:34788 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34788 dst: /127.0.0.1:37913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-12T09:32:19,513 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,513 INFO [M:0;106923ea030f:33601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/373f8ae507064ab18df7f5606edcbb11 2024-11-12T09:32:19,537 DEBUG [M:0;106923ea030f:33601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/590374e876344eb7b12d32ddd1f35025 is 69, key is 106923ea030f,34819,1731403935275/rs:state/1731403936451/Put/seqid=0 2024-11-12T09:32:19,539 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,539 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T09:32:19,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2116788459_22 at /127.0.0.1:49886 [Receiving block BP-820054393-172.17.0.2-1731403929408:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49886 dst: /127.0.0.1:36449 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T09:32:19,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-12T09:32:19,545 WARN [M:0;106923ea030f:33601 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T09:32:19,546 INFO [M:0;106923ea030f:33601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/590374e876344eb7b12d32ddd1f35025 2024-11-12T09:32:19,557 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61457eb5a53d4521b2da04cba648ca94 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/61457eb5a53d4521b2da04cba648ca94 2024-11-12T09:32:19,567 INFO [M:0;106923ea030f:33601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/61457eb5a53d4521b2da04cba648ca94, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T09:32:19,569 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/373f8ae507064ab18df7f5606edcbb11 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/373f8ae507064ab18df7f5606edcbb11 2024-11-12T09:32:19,578 INFO [M:0;106923ea030f:33601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/373f8ae507064ab18df7f5606edcbb11, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T09:32:19,580 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/590374e876344eb7b12d32ddd1f35025 as hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/590374e876344eb7b12d32ddd1f35025 2024-11-12T09:32:19,589 INFO [M:0;106923ea030f:33601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/590374e876344eb7b12d32ddd1f35025, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T09:32:19,591 INFO [M:0;106923ea030f:33601 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=72, compaction requested=false 2024-11-12T09:32:19,593 INFO [M:0;106923ea030f:33601 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:19,593 DEBUG [M:0;106923ea030f:33601 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731403939440Disabling compacts and flushes for region at 1731403939440Disabling writes for close at 1731403939441 (+1 ms)Obtaining lock to block concurrent updates at 1731403939441Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731403939441Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731403939441Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731403939442 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731403939442Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731403939462 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731403939462Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731403939481 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731403939500 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731403939500Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731403939521 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731403939536 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731403939536Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e019588: reopening flushed file at 1731403939555 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35dec195: reopening flushed file at 1731403939567 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bf69097: reopening flushed file at 1731403939578 (+11 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=72, compaction requested=false at 1731403939591 (+13 ms)Writing region close event to WAL at 1731403939592 (+1 ms)Closed at 1731403939592 2024-11-12T09:32:19,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37913 is added to blk_1073741825_1011 (size=32683) 2024-11-12T09:32:19,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741825_1011 (size=32683) 2024-11-12T09:32:19,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36449 is added to blk_1073741825_1011 (size=32683) 2024-11-12T09:32:19,597 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:19,598 INFO [M:0;106923ea030f:33601 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T09:32:19,598 INFO [M:0;106923ea030f:33601 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33601 2024-11-12T09:32:19,598 INFO [M:0;106923ea030f:33601 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:19,707 INFO [M:0;106923ea030f:33601 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:19,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33601-0x1012e6363050000, quorum=127.0.0.1:63479, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:19,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:19,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:19,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:19,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:19,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:19,769 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:19,769 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:19,770 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-820054393-172.17.0.2-1731403929408 (Datanode Uuid 87986298-ac6a-4a43-8071-71035f579b6e) service to localhost/127.0.0.1:37231 2024-11-12T09:32:19,770 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:19,771 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data5/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,771 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data6/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,771 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:19,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:19,774 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:19,774 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:19,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:19,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:19,775 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:19,775 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:19,775 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-820054393-172.17.0.2-1731403929408 (Datanode Uuid 629a177e-a519-4a1a-8b7d-9ca92883d176) service to localhost/127.0.0.1:37231 2024-11-12T09:32:19,775 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:19,776 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data3/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,776 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data4/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,776 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:19,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:19,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:19,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:19,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:19,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:19,780 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:19,780 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:19,780 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:19,780 WARN [BP-820054393-172.17.0.2-1731403929408 heartbeating to localhost/127.0.0.1:37231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-820054393-172.17.0.2-1731403929408 (Datanode Uuid abc8d647-d43c-40e0-9b32-952da22bd94b) service to localhost/127.0.0.1:37231 2024-11-12T09:32:19,781 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data1/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,781 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/cluster_47d7e28a-ae76-71a6-6eff-9980c6c1d183/data/data2/current/BP-820054393-172.17.0.2-1731403929408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:19,781 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:19,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T09:32:19,790 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:19,790 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:19,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:19,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:19,799 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T09:32:19,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T09:32:19,835 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 157), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=236 (was 222) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7571 (was 7918) 2024-11-12T09:32:19,841 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=236, ProcessCount=11, AvailableMemoryMB=7571 2024-11-12T09:32:19,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T09:32:19,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.log.dir so I do NOT create it in target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5 2024-11-12T09:32:19,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/06c0b62d-cd4f-2a16-a6a5-642890b1f1bf/hadoop.tmp.dir so I do NOT create it in target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b, deleteOnExit=true 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/test.cache.data in system properties and HBase conf 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir in system properties and HBase conf 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T09:32:19,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T09:32:19,842 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/nfs.dump.dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/java.io.tmpdir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T09:32:19,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T09:32:19,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T09:32:20,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:20,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:20,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:20,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:20,212 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:20,213 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:20,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:20,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:20,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/java.io.tmpdir/jetty-localhost-38163-hadoop-hdfs-3_4_1-tests_jar-_-any-15272993437164936192/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T09:32:20,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:38163} 2024-11-12T09:32:20,310 INFO [Time-limited test {}] server.Server(415): Started @12683ms 2024-11-12T09:32:20,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:20,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:20,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:20,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:20,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:20,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:20,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:20,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38e5384{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/java.io.tmpdir/jetty-localhost-40069-hadoop-hdfs-3_4_1-tests_jar-_-any-16853752267921565318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:20,663 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:40069} 2024-11-12T09:32:20,663 INFO [Time-limited test {}] server.Server(415): Started @13037ms 2024-11-12T09:32:20,665 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:20,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:20,703 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:20,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:20,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:20,704 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T09:32:20,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:20,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:20,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e5e4927{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/java.io.tmpdir/jetty-localhost-33349-hadoop-hdfs-3_4_1-tests_jar-_-any-2281823096279657221/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:20,798 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:33349} 2024-11-12T09:32:20,798 INFO [Time-limited test {}] server.Server(415): Started @13171ms 2024-11-12T09:32:20,799 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:20,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T09:32:20,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T09:32:20,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T09:32:20,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T09:32:20,859 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T09:32:20,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,AVAILABLE} 2024-11-12T09:32:20,860 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T09:32:20,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bff0a43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/java.io.tmpdir/jetty-localhost-41067-hadoop-hdfs-3_4_1-tests_jar-_-any-1444653246167477465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:20,957 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:41067} 2024-11-12T09:32:20,957 INFO [Time-limited test {}] server.Server(415): Started @13330ms 2024-11-12T09:32:20,958 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T09:32:22,045 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data2/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,045 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data1/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,062 WARN [Thread-500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccadb24c445e145 with lease ID 0xfa5256a65c6f672d: Processing first storage report for DS-ce32df6b-78b1-423e-b90c-4e2ebd2d0ec4 from datanode DatanodeRegistration(127.0.0.1:43253, datanodeUuid=6ae5e573-fdff-478f-bc18-c2c087979327, infoPort=38463, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccadb24c445e145 with lease ID 0xfa5256a65c6f672d: from storage DS-ce32df6b-78b1-423e-b90c-4e2ebd2d0ec4 node DatanodeRegistration(127.0.0.1:43253, datanodeUuid=6ae5e573-fdff-478f-bc18-c2c087979327, infoPort=38463, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccadb24c445e145 with lease ID 0xfa5256a65c6f672d: Processing first storage report for DS-37f4b56f-95d8-4d59-8017-e9001539164f from datanode DatanodeRegistration(127.0.0.1:43253, datanodeUuid=6ae5e573-fdff-478f-bc18-c2c087979327, infoPort=38463, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccadb24c445e145 with lease ID 0xfa5256a65c6f672d: from storage DS-37f4b56f-95d8-4d59-8017-e9001539164f node DatanodeRegistration(127.0.0.1:43253, datanodeUuid=6ae5e573-fdff-478f-bc18-c2c087979327, infoPort=38463, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,247 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data3/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,247 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data4/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,263 WARN [Thread-523 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:22,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d53b94e80828fbc with lease ID 0xfa5256a65c6f672e: Processing first storage report for DS-f1d06727-5af9-40d7-bbf2-71eee39f0d79 from datanode DatanodeRegistration(127.0.0.1:36297, datanodeUuid=3baf23ec-a6ab-4dc5-8651-0287cc548d03, infoPort=38157, infoSecurePort=0, ipcPort=33171, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d53b94e80828fbc with lease ID 0xfa5256a65c6f672e: from storage DS-f1d06727-5af9-40d7-bbf2-71eee39f0d79 node DatanodeRegistration(127.0.0.1:36297, datanodeUuid=3baf23ec-a6ab-4dc5-8651-0287cc548d03, infoPort=38157, infoSecurePort=0, ipcPort=33171, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d53b94e80828fbc with lease ID 0xfa5256a65c6f672e: Processing first storage report for DS-5e5f5ea8-a700-4053-bb92-5a6b45f9fa3a from datanode DatanodeRegistration(127.0.0.1:36297, datanodeUuid=3baf23ec-a6ab-4dc5-8651-0287cc548d03, infoPort=38157, infoSecurePort=0, ipcPort=33171, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d53b94e80828fbc with lease ID 0xfa5256a65c6f672e: from storage DS-5e5f5ea8-a700-4053-bb92-5a6b45f9fa3a node DatanodeRegistration(127.0.0.1:36297, datanodeUuid=3baf23ec-a6ab-4dc5-8651-0287cc548d03, infoPort=38157, infoSecurePort=0, ipcPort=33171, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,328 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data5/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,328 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data6/current/BP-110200569-172.17.0.2-1731403939869/current, will proceed with Du for space computation calculation, 2024-11-12T09:32:22,343 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T09:32:22,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x570bd023c8495c70 with lease ID 0xfa5256a65c6f672f: Processing first storage report for DS-4ab5aefb-2a17-4c5e-b212-3501b17c2424 from datanode DatanodeRegistration(127.0.0.1:33811, datanodeUuid=6922bd40-6891-44b2-9122-ee9897812a10, infoPort=46115, infoSecurePort=0, ipcPort=35465, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x570bd023c8495c70 with lease ID 0xfa5256a65c6f672f: from storage DS-4ab5aefb-2a17-4c5e-b212-3501b17c2424 node DatanodeRegistration(127.0.0.1:33811, datanodeUuid=6922bd40-6891-44b2-9122-ee9897812a10, infoPort=46115, infoSecurePort=0, ipcPort=35465, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x570bd023c8495c70 with lease ID 0xfa5256a65c6f672f: Processing first storage report for DS-418bb625-23c1-4b44-bb5a-e72ba554f210 from datanode DatanodeRegistration(127.0.0.1:33811, datanodeUuid=6922bd40-6891-44b2-9122-ee9897812a10, infoPort=46115, infoSecurePort=0, ipcPort=35465, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869) 2024-11-12T09:32:22,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x570bd023c8495c70 with lease ID 0xfa5256a65c6f672f: from storage DS-418bb625-23c1-4b44-bb5a-e72ba554f210 node DatanodeRegistration(127.0.0.1:33811, datanodeUuid=6922bd40-6891-44b2-9122-ee9897812a10, infoPort=46115, infoSecurePort=0, ipcPort=35465, storageInfo=lv=-57;cid=testClusterID;nsid=430162754;c=1731403939869), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T09:32:22,400 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5 2024-11-12T09:32:22,403 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/zookeeper_0, clientPort=49717, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T09:32:22,404 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49717 2024-11-12T09:32:22,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741825_1001 (size=7) 2024-11-12T09:32:22,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741825_1001 (size=7) 2024-11-12T09:32:22,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741825_1001 (size=7) 2024-11-12T09:32:22,424 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f with version=8 2024-11-12T09:32:22,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37231/user/jenkins/test-data/2d377198-c498-6f29-3949-ffd69f143f68/hbase-staging 2024-11-12T09:32:22,426 INFO [Time-limited test {}] client.ConnectionUtils(128): master/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:22,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,426 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:22,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:22,427 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T09:32:22,427 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:22,427 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45249 2024-11-12T09:32:22,429 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45249 connecting to ZooKeeper ensemble=127.0.0.1:49717 2024-11-12T09:32:22,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452490x0, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:22,496 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45249-0x1012e6385ae0000 connected 2024-11-12T09:32:22,576 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:22,584 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f, hbase.cluster.distributed=false 2024-11-12T09:32:22,587 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:22,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45249 2024-11-12T09:32:22,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45249 2024-11-12T09:32:22,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45249 2024-11-12T09:32:22,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45249 2024-11-12T09:32:22,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45249 2024-11-12T09:32:22,609 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:22,609 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:22,610 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36723 2024-11-12T09:32:22,611 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36723 connecting to ZooKeeper ensemble=127.0.0.1:49717 2024-11-12T09:32:22,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367230x0, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:22,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:22,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36723-0x1012e6385ae0001 connected 2024-11-12T09:32:22,628 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:22,629 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:22,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:22,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:22,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36723 2024-11-12T09:32:22,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36723 2024-11-12T09:32:22,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36723 2024-11-12T09:32:22,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36723 2024-11-12T09:32:22,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36723 2024-11-12T09:32:22,651 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:22,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,651 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:22,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:22,652 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:22,652 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:22,652 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45641 2024-11-12T09:32:22,654 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45641 connecting to ZooKeeper ensemble=127.0.0.1:49717 2024-11-12T09:32:22,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456410x0, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:22,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45641-0x1012e6385ae0002 connected 2024-11-12T09:32:22,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:22,670 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:22,671 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:22,672 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:22,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:22,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45641 2024-11-12T09:32:22,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45641 2024-11-12T09:32:22,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45641 2024-11-12T09:32:22,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45641 2024-11-12T09:32:22,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45641 2024-11-12T09:32:22,693 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/106923ea030f:0 server-side Connection retries=45 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T09:32:22,693 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T09:32:22,694 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37117 2024-11-12T09:32:22,696 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37117 connecting to ZooKeeper ensemble=127.0.0.1:49717 2024-11-12T09:32:22,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371170x0, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T09:32:22,712 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37117-0x1012e6385ae0003 connected 2024-11-12T09:32:22,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:22,712 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T09:32:22,713 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T09:32:22,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T09:32:22,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T09:32:22,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37117 2024-11-12T09:32:22,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37117 2024-11-12T09:32:22,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37117 2024-11-12T09:32:22,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37117 2024-11-12T09:32:22,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37117 2024-11-12T09:32:22,733 DEBUG [M:0;106923ea030f:45249 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;106923ea030f:45249 2024-11-12T09:32:22,733 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/106923ea030f,45249,1731403942426 2024-11-12T09:32:22,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,744 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/106923ea030f,45249,1731403942426 2024-11-12T09:32:22,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:22,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:22,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:22,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,755 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T09:32:22,756 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/106923ea030f,45249,1731403942426 from backup master directory 2024-11-12T09:32:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/106923ea030f,45249,1731403942426 2024-11-12T09:32:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T09:32:22,765 WARN [master/106923ea030f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:22,765 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=106923ea030f,45249,1731403942426 2024-11-12T09:32:22,774 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/hbase.id] with ID: f0638e33-2dcf-4900-a0ac-d62cc2d64b55 2024-11-12T09:32:22,774 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/.tmp/hbase.id 2024-11-12T09:32:22,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741826_1002 (size=42) 2024-11-12T09:32:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741826_1002 (size=42) 2024-11-12T09:32:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741826_1002 (size=42) 2024-11-12T09:32:22,785 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/.tmp/hbase.id]:[hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/hbase.id] 2024-11-12T09:32:22,802 INFO [master/106923ea030f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T09:32:22,802 INFO [master/106923ea030f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T09:32:22,804 INFO [master/106923ea030f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-12T09:32:22,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:22,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741827_1003 (size=196) 2024-11-12T09:32:22,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741827_1003 (size=196) 2024-11-12T09:32:22,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741827_1003 (size=196) 2024-11-12T09:32:22,828 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T09:32:22,830 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T09:32:22,831 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T09:32:22,831 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T09:32:22,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741828_1004 (size=1189) 2024-11-12T09:32:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741828_1004 (size=1189) 2024-11-12T09:32:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741828_1004 (size=1189) 2024-11-12T09:32:22,846 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store 2024-11-12T09:32:22,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741829_1005 (size=34) 2024-11-12T09:32:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741829_1005 (size=34) 2024-11-12T09:32:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741829_1005 (size=34) 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T09:32:22,860 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:22,860 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:22,860 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731403942860Disabling compacts and flushes for region at 1731403942860Disabling writes for close at 1731403942860Writing region close event to WAL at 1731403942860Closed at 1731403942860 2024-11-12T09:32:22,861 WARN [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/.initializing 2024-11-12T09:32:22,862 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/WALs/106923ea030f,45249,1731403942426 2024-11-12T09:32:22,865 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C45249%2C1731403942426, suffix=, logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/WALs/106923ea030f,45249,1731403942426, archiveDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/oldWALs, maxLogs=10 2024-11-12T09:32:22,866 INFO [master/106923ea030f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 106923ea030f%2C45249%2C1731403942426.1731403942866 2024-11-12T09:32:22,876 INFO [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/WALs/106923ea030f,45249,1731403942426/106923ea030f%2C45249%2C1731403942426.1731403942866 2024-11-12T09:32:22,879 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38157:38157),(127.0.0.1/127.0.0.1:38463:38463),(127.0.0.1/127.0.0.1:46115:46115)] 2024-11-12T09:32:22,879 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:22,880 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:22,880 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,880 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T09:32:22,884 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:22,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:22,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T09:32:22,887 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:22,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:22,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T09:32:22,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:22,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:22,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T09:32:22,894 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:22,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:22,895 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,896 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,896 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T09:32:22,898 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,899 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T09:32:22,899 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:22,901 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T09:32:22,903 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:22,904 INFO [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61090796, jitterRate=-0.08967620134353638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:22,906 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731403942880Initializing all the Stores at 1731403942881 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403942881Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403942882 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403942882Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403942882Cleaning up temporary data from old regions at 1731403942899 (+17 ms)Region opened successfully at 1731403942905 (+6 ms) 2024-11-12T09:32:22,906 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T09:32:22,911 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ef05d68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:22,912 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T09:32:22,912 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T09:32:22,912 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T09:32:22,913 INFO [master/106923ea030f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T09:32:22,913 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T09:32:22,914 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T09:32:22,914 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T09:32:22,919 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T09:32:22,920 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T09:32:22,984 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T09:32:22,985 INFO [master/106923ea030f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T09:32:22,986 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T09:32:23,028 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T09:32:23,029 INFO [master/106923ea030f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T09:32:23,032 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T09:32:23,045 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T09:32:23,048 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T09:32:23,059 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T09:32:23,061 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T09:32:23,069 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,081 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=106923ea030f,45249,1731403942426, sessionid=0x1012e6385ae0000, setting cluster-up flag (Was=false) 2024-11-12T09:32:23,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,133 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T09:32:23,135 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=106923ea030f,45249,1731403942426 2024-11-12T09:32:23,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,185 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T09:32:23,187 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=106923ea030f,45249,1731403942426 2024-11-12T09:32:23,189 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T09:32:23,193 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:23,193 INFO [master/106923ea030f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T09:32:23,193 INFO [master/106923ea030f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T09:32:23,194 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 106923ea030f,45249,1731403942426 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T09:32:23,196 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:23,196 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:23,196 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:23,197 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/106923ea030f:0, corePoolSize=5, maxPoolSize=5 2024-11-12T09:32:23,197 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/106923ea030f:0, corePoolSize=10, maxPoolSize=10 2024-11-12T09:32:23,197 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,197 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:23,197 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,199 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731403973199 2024-11-12T09:32:23,199 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T09:32:23,199 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T09:32:23,200 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T09:32:23,201 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:23,201 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T09:32:23,201 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T09:32:23,201 INFO [master/106923ea030f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T09:32:23,202 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,202 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T09:32:23,204 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403943201,5,FailOnTimeoutGroup] 2024-11-12T09:32:23,204 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403943204,5,FailOnTimeoutGroup] 2024-11-12T09:32:23,204 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,204 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T09:32:23,204 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,205 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741831_1007 (size=1321) 2024-11-12T09:32:23,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741831_1007 (size=1321) 2024-11-12T09:32:23,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741831_1007 (size=1321) 2024-11-12T09:32:23,214 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T09:32:23,215 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f 2024-11-12T09:32:23,226 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(746): ClusterId : f0638e33-2dcf-4900-a0ac-d62cc2d64b55 2024-11-12T09:32:23,226 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:23,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741832_1008 (size=32) 2024-11-12T09:32:23,226 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(746): ClusterId : f0638e33-2dcf-4900-a0ac-d62cc2d64b55 2024-11-12T09:32:23,227 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:23,227 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(746): ClusterId : f0638e33-2dcf-4900-a0ac-d62cc2d64b55 2024-11-12T09:32:23,227 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T09:32:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741832_1008 (size=32) 2024-11-12T09:32:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741832_1008 (size=32) 2024-11-12T09:32:23,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:23,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T09:32:23,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T09:32:23,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T09:32:23,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T09:32:23,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T09:32:23,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T09:32:23,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T09:32:23,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T09:32:23,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T09:32:23,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740 2024-11-12T09:32:23,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740 2024-11-12T09:32:23,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T09:32:23,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T09:32:23,243 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:23,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T09:32:23,246 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:23,247 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71724634, jitterRate=0.0687803328037262}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:23,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731403943228Initializing all the Stores at 1731403943229 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943229Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943229Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403943230 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943230Cleaning up temporary data from old regions at 1731403943242 (+12 ms)Region opened successfully at 1731403943248 (+6 ms) 2024-11-12T09:32:23,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T09:32:23,248 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T09:32:23,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T09:32:23,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T09:32:23,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T09:32:23,249 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:23,249 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:23,249 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:23,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731403943248Disabling compacts and flushes for region at 1731403943248Disabling writes for close at 1731403943248Writing region close event to WAL at 1731403943249 (+1 ms)Closed at 1731403943249 2024-11-12T09:32:23,249 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:23,249 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:23,249 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T09:32:23,249 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T09:32:23,251 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:23,251 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T09:32:23,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T09:32:23,253 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T09:32:23,254 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T09:32:23,270 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:23,270 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:23,270 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T09:32:23,270 DEBUG [RS:0;106923ea030f:36723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34bed4d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:23,270 DEBUG [RS:1;106923ea030f:45641 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ee8e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:23,271 DEBUG [RS:2;106923ea030f:37117 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a0c0847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=106923ea030f/172.17.0.2:0 2024-11-12T09:32:23,281 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;106923ea030f:37117 2024-11-12T09:32:23,281 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;106923ea030f:45641 2024-11-12T09:32:23,281 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;106923ea030f:36723 2024-11-12T09:32:23,282 INFO [RS:2;106923ea030f:37117 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:23,282 INFO [RS:2;106923ea030f:37117 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:23,282 INFO [RS:0;106923ea030f:36723 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:23,282 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:23,282 INFO [RS:0;106923ea030f:36723 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:23,282 INFO [RS:1;106923ea030f:45641 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T09:32:23,282 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:23,282 INFO [RS:1;106923ea030f:45641 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T09:32:23,282 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T09:32:23,283 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,45249,1731403942426 with port=45641, startcode=1731403942651 2024-11-12T09:32:23,283 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,45249,1731403942426 with port=37117, startcode=1731403942692 2024-11-12T09:32:23,283 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(2659): reportForDuty to master=106923ea030f,45249,1731403942426 with port=36723, startcode=1731403942608 2024-11-12T09:32:23,283 DEBUG [RS:1;106923ea030f:45641 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:23,283 DEBUG [RS:2;106923ea030f:37117 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:23,283 DEBUG [RS:0;106923ea030f:36723 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T09:32:23,285 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35327, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:23,285 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:23,285 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45737, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T09:32:23,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,37117,1731403942692 2024-11-12T09:32:23,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(517): Registering regionserver=106923ea030f,37117,1731403942692 2024-11-12T09:32:23,288 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,45641,1731403942651 2024-11-12T09:32:23,288 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(517): Registering regionserver=106923ea030f,45641,1731403942651 2024-11-12T09:32:23,288 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f 2024-11-12T09:32:23,288 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35775 2024-11-12T09:32:23,288 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:23,289 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 106923ea030f,36723,1731403942608 2024-11-12T09:32:23,289 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45249 {}] master.ServerManager(517): Registering regionserver=106923ea030f,36723,1731403942608 2024-11-12T09:32:23,290 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f 2024-11-12T09:32:23,290 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35775 2024-11-12T09:32:23,290 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:23,291 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f 2024-11-12T09:32:23,291 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35775 2024-11-12T09:32:23,291 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T09:32:23,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:23,340 DEBUG [RS:2;106923ea030f:37117 {}] zookeeper.ZKUtil(111): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,37117,1731403942692 2024-11-12T09:32:23,340 WARN [RS:2;106923ea030f:37117 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:23,340 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,45641,1731403942651] 2024-11-12T09:32:23,340 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,37117,1731403942692] 2024-11-12T09:32:23,340 INFO [RS:2;106923ea030f:37117 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T09:32:23,340 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [106923ea030f,36723,1731403942608] 2024-11-12T09:32:23,340 DEBUG [RS:1;106923ea030f:45641 {}] zookeeper.ZKUtil(111): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,45641,1731403942651 2024-11-12T09:32:23,340 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,37117,1731403942692 2024-11-12T09:32:23,340 DEBUG [RS:0;106923ea030f:36723 {}] zookeeper.ZKUtil(111): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/106923ea030f,36723,1731403942608 2024-11-12T09:32:23,341 WARN [RS:1;106923ea030f:45641 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:23,341 WARN [RS:0;106923ea030f:36723 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T09:32:23,341 INFO [RS:1;106923ea030f:45641 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T09:32:23,341 INFO [RS:0;106923ea030f:36723 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T09:32:23,341 DEBUG [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,45641,1731403942651 2024-11-12T09:32:23,341 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,36723,1731403942608 2024-11-12T09:32:23,348 INFO [RS:1;106923ea030f:45641 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:23,348 INFO [RS:2;106923ea030f:37117 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:23,348 INFO [RS:0;106923ea030f:36723 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T09:32:23,351 INFO [RS:1;106923ea030f:45641 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:23,352 INFO [RS:1;106923ea030f:45641 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:23,352 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,352 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:23,353 INFO [RS:1;106923ea030f:45641 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:23,353 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,353 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,353 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,354 DEBUG [RS:1;106923ea030f:45641 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,356 INFO [RS:2;106923ea030f:37117 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:23,356 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,356 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:2;106923ea030f:37117 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:23,357 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45641,1731403942651-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,357 INFO [RS:0;106923ea030f:36723 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T09:32:23,360 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:23,360 INFO [RS:0;106923ea030f:36723 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T09:32:23,360 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,361 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T09:32:23,361 INFO [RS:2;106923ea030f:37117 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:23,362 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 INFO [RS:0;106923ea030f:36723 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,362 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,362 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 DEBUG [RS:2;106923ea030f:37117 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/106923ea030f:0, corePoolSize=2, maxPoolSize=2 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/106923ea030f:0, corePoolSize=1, maxPoolSize=1 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,363 DEBUG [RS:0;106923ea030f:36723 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0, corePoolSize=3, maxPoolSize=3 2024-11-12T09:32:23,363 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,37117,1731403942692-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,365 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,36723,1731403942608-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:23,379 INFO [RS:1;106923ea030f:45641 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:23,379 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45641,1731403942651-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,379 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,379 INFO [RS:1;106923ea030f:45641 {}] regionserver.Replication(171): 106923ea030f,45641,1731403942651 started 2024-11-12T09:32:23,381 INFO [RS:0;106923ea030f:36723 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:23,381 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,36723,1731403942608-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,381 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,381 INFO [RS:0;106923ea030f:36723 {}] regionserver.Replication(171): 106923ea030f,36723,1731403942608 started 2024-11-12T09:32:23,383 INFO [RS:2;106923ea030f:37117 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T09:32:23,383 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,37117,1731403942692-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,383 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,383 INFO [RS:2;106923ea030f:37117 {}] regionserver.Replication(171): 106923ea030f,37117,1731403942692 started 2024-11-12T09:32:23,394 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,394 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,36723,1731403942608, RpcServer on 106923ea030f/172.17.0.2:36723, sessionid=0x1012e6385ae0001 2024-11-12T09:32:23,394 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:23,394 DEBUG [RS:0;106923ea030f:36723 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,36723,1731403942608 2024-11-12T09:32:23,394 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,36723,1731403942608' 2024-11-12T09:32:23,394 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:23,395 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:23,395 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:23,395 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:23,395 DEBUG [RS:0;106923ea030f:36723 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,36723,1731403942608 2024-11-12T09:32:23,395 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,36723,1731403942608' 2024-11-12T09:32:23,396 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:23,396 DEBUG [RS:0;106923ea030f:36723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:23,396 DEBUG [RS:0;106923ea030f:36723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:23,397 INFO [RS:0;106923ea030f:36723 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:23,397 INFO [RS:0;106923ea030f:36723 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:23,397 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,397 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,45641,1731403942651, RpcServer on 106923ea030f/172.17.0.2:45641, sessionid=0x1012e6385ae0002 2024-11-12T09:32:23,397 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:23,397 DEBUG [RS:1;106923ea030f:45641 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,45641,1731403942651 2024-11-12T09:32:23,397 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,45641,1731403942651' 2024-11-12T09:32:23,397 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,45641,1731403942651 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,45641,1731403942651' 2024-11-12T09:32:23,398 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:23,399 DEBUG [RS:1;106923ea030f:45641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:23,399 DEBUG [RS:1;106923ea030f:45641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:23,399 INFO [RS:1;106923ea030f:45641 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:23,399 INFO [RS:1;106923ea030f:45641 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:23,400 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,400 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1482): Serving as 106923ea030f,37117,1731403942692, RpcServer on 106923ea030f/172.17.0.2:37117, sessionid=0x1012e6385ae0003 2024-11-12T09:32:23,401 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T09:32:23,401 DEBUG [RS:2;106923ea030f:37117 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 106923ea030f,37117,1731403942692 2024-11-12T09:32:23,401 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,37117,1731403942692' 2024-11-12T09:32:23,401 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T09:32:23,401 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T09:32:23,402 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T09:32:23,402 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T09:32:23,402 DEBUG [RS:2;106923ea030f:37117 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 106923ea030f,37117,1731403942692 2024-11-12T09:32:23,402 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '106923ea030f,37117,1731403942692' 2024-11-12T09:32:23,402 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T09:32:23,403 DEBUG [RS:2;106923ea030f:37117 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T09:32:23,403 DEBUG [RS:2;106923ea030f:37117 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T09:32:23,404 INFO [RS:2;106923ea030f:37117 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T09:32:23,404 INFO [RS:2;106923ea030f:37117 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T09:32:23,405 WARN [106923ea030f:45249 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T09:32:23,503 INFO [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C36723%2C1731403942608, suffix=, logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,36723,1731403942608, archiveDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs, maxLogs=32 2024-11-12T09:32:23,505 INFO [RS:1;106923ea030f:45641 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C45641%2C1731403942651, suffix=, logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,45641,1731403942651, archiveDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs, maxLogs=32 2024-11-12T09:32:23,507 INFO [RS:2;106923ea030f:37117 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C37117%2C1731403942692, suffix=, logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,37117,1731403942692, archiveDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs, maxLogs=32 2024-11-12T09:32:23,507 INFO [RS:0;106923ea030f:36723 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 106923ea030f%2C36723%2C1731403942608.1731403943507 2024-11-12T09:32:23,508 INFO [RS:1;106923ea030f:45641 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 106923ea030f%2C45641%2C1731403942651.1731403943508 2024-11-12T09:32:23,511 INFO [RS:2;106923ea030f:37117 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 106923ea030f%2C37117%2C1731403942692.1731403943510 2024-11-12T09:32:23,520 INFO [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,36723,1731403942608/106923ea030f%2C36723%2C1731403942608.1731403943507 2024-11-12T09:32:23,520 INFO [RS:1;106923ea030f:45641 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,45641,1731403942651/106923ea030f%2C45641%2C1731403942651.1731403943508 2024-11-12T09:32:23,524 INFO [RS:2;106923ea030f:37117 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,37117,1731403942692/106923ea030f%2C37117%2C1731403942692.1731403943510 2024-11-12T09:32:23,524 DEBUG [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38157:38157),(127.0.0.1/127.0.0.1:38463:38463),(127.0.0.1/127.0.0.1:46115:46115)] 2024-11-12T09:32:23,524 DEBUG [RS:1;106923ea030f:45641 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46115:46115),(127.0.0.1/127.0.0.1:38463:38463),(127.0.0.1/127.0.0.1:38157:38157)] 2024-11-12T09:32:23,526 DEBUG [RS:2;106923ea030f:37117 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38157:38157),(127.0.0.1/127.0.0.1:38463:38463),(127.0.0.1/127.0.0.1:46115:46115)] 2024-11-12T09:32:23,655 DEBUG [106923ea030f:45249 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T09:32:23,656 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(204): Hosts are {106923ea030f=0} racks are {/default-rack=0} 2024-11-12T09:32:23,660 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T09:32:23,661 INFO [106923ea030f:45249 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T09:32:23,661 INFO [106923ea030f:45249 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T09:32:23,661 INFO [106923ea030f:45249 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T09:32:23,661 DEBUG [106923ea030f:45249 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T09:32:23,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=106923ea030f,36723,1731403942608 2024-11-12T09:32:23,665 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 106923ea030f,36723,1731403942608, state=OPENING 2024-11-12T09:32:23,698 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T09:32:23,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:23,714 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T09:32:23,715 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,715 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,715 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=106923ea030f,36723,1731403942608}] 2024-11-12T09:32:23,715 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,873 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T09:32:23,878 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60195, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T09:32:23,887 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T09:32:23,887 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T09:32:23,891 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=106923ea030f%2C36723%2C1731403942608.meta, suffix=.meta, logDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,36723,1731403942608, archiveDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs, maxLogs=32 2024-11-12T09:32:23,893 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 106923ea030f%2C36723%2C1731403942608.meta.1731403943893.meta 2024-11-12T09:32:23,901 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/WALs/106923ea030f,36723,1731403942608/106923ea030f%2C36723%2C1731403942608.meta.1731403943893.meta 2024-11-12T09:32:23,903 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46115:46115),(127.0.0.1/127.0.0.1:38157:38157),(127.0.0.1/127.0.0.1:38463:38463)] 2024-11-12T09:32:23,903 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:23,904 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T09:32:23,904 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T09:32:23,904 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T09:32:23,904 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T09:32:23,904 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:23,904 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T09:32:23,905 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T09:32:23,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T09:32:23,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T09:32:23,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T09:32:23,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T09:32:23,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T09:32:23,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T09:32:23,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T09:32:23,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T09:32:23,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:23,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T09:32:23,914 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T09:32:23,915 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740 2024-11-12T09:32:23,917 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740 2024-11-12T09:32:23,919 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T09:32:23,919 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T09:32:23,920 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T09:32:23,922 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T09:32:23,923 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67996232, jitterRate=0.013222813606262207}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T09:32:23,923 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T09:32:23,924 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731403943905Writing region info on filesystem at 1731403943905Initializing all the Stores at 1731403943906 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403943906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731403943906Cleaning up temporary data from old regions at 1731403943919 (+13 ms)Running coprocessor post-open hooks at 1731403943923 (+4 ms)Region opened successfully at 1731403943924 (+1 ms) 2024-11-12T09:32:23,926 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731403943872 2024-11-12T09:32:23,930 DEBUG [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T09:32:23,930 INFO [RS_OPEN_META-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T09:32:23,931 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=106923ea030f,36723,1731403942608 2024-11-12T09:32:23,932 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 106923ea030f,36723,1731403942608, state=OPEN 2024-11-12T09:32:23,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:23,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:23,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:23,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T09:32:23,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T09:32:23,943 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=106923ea030f,36723,1731403942608 2024-11-12T09:32:23,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T09:32:23,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=106923ea030f,36723,1731403942608 in 228 msec 2024-11-12T09:32:23,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T09:32:23,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 698 msec 2024-11-12T09:32:23,954 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T09:32:23,954 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T09:32:23,956 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T09:32:23,956 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=106923ea030f,36723,1731403942608, seqNum=-1] 2024-11-12T09:32:23,956 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:23,958 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:23,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 773 msec 2024-11-12T09:32:23,966 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731403943966, completionTime=-1 2024-11-12T09:32:23,966 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T09:32:23,966 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T09:32:23,969 INFO [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T09:32:23,969 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731404003969 2024-11-12T09:32:23,969 INFO [master/106923ea030f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731404063969 2024-11-12T09:32:23,969 INFO [master/106923ea030f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T09:32:23,969 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-106923ea030f:45249, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,970 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:23,973 DEBUG [master/106923ea030f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T09:32:23,975 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.210sec 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T09:32:23,976 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T09:32:23,979 DEBUG [master/106923ea030f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T09:32:23,979 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T09:32:23,979 INFO [master/106923ea030f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=106923ea030f,45249,1731403942426-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T09:32:24,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@376b00bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:24,027 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 106923ea030f,45249,-1 for getting cluster id 2024-11-12T09:32:24,027 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T09:32:24,029 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f0638e33-2dcf-4900-a0ac-d62cc2d64b55' 2024-11-12T09:32:24,029 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T09:32:24,029 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f0638e33-2dcf-4900-a0ac-d62cc2d64b55" 2024-11-12T09:32:24,029 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d99438, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:24,029 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [106923ea030f,45249,-1] 2024-11-12T09:32:24,030 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T09:32:24,030 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:24,031 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T09:32:24,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a6e0fe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T09:32:24,033 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T09:32:24,034 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=106923ea030f,36723,1731403942608, seqNum=-1] 2024-11-12T09:32:24,035 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:24,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55558, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:24,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=106923ea030f,45249,1731403942426 2024-11-12T09:32:24,040 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T09:32:24,041 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 106923ea030f,45249,1731403942426 2024-11-12T09:32:24,041 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7042f4c2 2024-11-12T09:32:24,041 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T09:32:24,043 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T09:32:24,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T09:32:24,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T09:32:24,049 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T09:32:24,049 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:24,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T09:32:24,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:24,051 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T09:32:24,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741837_1013 (size=392) 2024-11-12T09:32:24,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741837_1013 (size=392) 2024-11-12T09:32:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741837_1013 (size=392) 2024-11-12T09:32:24,064 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 377cad56d0197addfbd7eb4daf38d6b3, NAME => 'TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f 2024-11-12T09:32:24,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741838_1014 (size=51) 2024-11-12T09:32:24,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741838_1014 (size=51) 2024-11-12T09:32:24,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741838_1014 (size=51) 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 377cad56d0197addfbd7eb4daf38d6b3, disabling compactions & flushes 2024-11-12T09:32:24,074 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. after waiting 0 ms 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,074 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,074 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 377cad56d0197addfbd7eb4daf38d6b3: Waiting for close lock at 1731403944074Disabling compacts and flushes for region at 1731403944074Disabling writes for close at 1731403944074Writing region close event to WAL at 1731403944074Closed at 1731403944074 2024-11-12T09:32:24,076 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T09:32:24,077 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731403944076"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731403944076"}]},"ts":"1731403944076"} 2024-11-12T09:32:24,080 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T09:32:24,082 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T09:32:24,082 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731403944082"}]},"ts":"1731403944082"} 2024-11-12T09:32:24,085 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T09:32:24,086 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {106923ea030f=0} racks are {/default-rack=0} 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T09:32:24,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T09:32:24,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T09:32:24,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T09:32:24,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T09:32:24,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=377cad56d0197addfbd7eb4daf38d6b3, ASSIGN}] 2024-11-12T09:32:24,090 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=377cad56d0197addfbd7eb4daf38d6b3, ASSIGN 2024-11-12T09:32:24,091 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=377cad56d0197addfbd7eb4daf38d6b3, ASSIGN; state=OFFLINE, location=106923ea030f,37117,1731403942692; forceNewPlan=false, retain=false 2024-11-12T09:32:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:24,242 INFO [106923ea030f:45249 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T09:32:24,242 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=377cad56d0197addfbd7eb4daf38d6b3, regionState=OPENING, regionLocation=106923ea030f,37117,1731403942692 2024-11-12T09:32:24,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=377cad56d0197addfbd7eb4daf38d6b3, ASSIGN because future has completed 2024-11-12T09:32:24,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 377cad56d0197addfbd7eb4daf38d6b3, server=106923ea030f,37117,1731403942692}] 2024-11-12T09:32:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:24,405 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T09:32:24,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40205, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T09:32:24,418 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,419 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 377cad56d0197addfbd7eb4daf38d6b3, NAME => 'TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.', STARTKEY => '', ENDKEY => ''} 2024-11-12T09:32:24,419 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,420 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T09:32:24,420 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,420 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,422 INFO [StoreOpener-377cad56d0197addfbd7eb4daf38d6b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,423 INFO [StoreOpener-377cad56d0197addfbd7eb4daf38d6b3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 377cad56d0197addfbd7eb4daf38d6b3 columnFamilyName cf 2024-11-12T09:32:24,423 DEBUG [StoreOpener-377cad56d0197addfbd7eb4daf38d6b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T09:32:24,424 INFO [StoreOpener-377cad56d0197addfbd7eb4daf38d6b3-1 {}] regionserver.HStore(327): Store=377cad56d0197addfbd7eb4daf38d6b3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T09:32:24,424 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,425 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,426 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,426 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,426 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,428 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,431 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T09:32:24,431 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 377cad56d0197addfbd7eb4daf38d6b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62080976, jitterRate=-0.0749213695526123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T09:32:24,431 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:24,432 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 377cad56d0197addfbd7eb4daf38d6b3: Running coprocessor pre-open hook at 1731403944420Writing region info on filesystem at 1731403944420Initializing all the Stores at 1731403944421 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731403944421Cleaning up temporary data from old regions at 1731403944426 (+5 ms)Running coprocessor post-open hooks at 1731403944432 (+6 ms)Region opened successfully at 1731403944432 2024-11-12T09:32:24,434 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3., pid=6, masterSystemTime=1731403944404 2024-11-12T09:32:24,437 DEBUG [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,437 INFO [RS_OPEN_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,438 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=377cad56d0197addfbd7eb4daf38d6b3, regionState=OPEN, openSeqNum=2, regionLocation=106923ea030f,37117,1731403942692 2024-11-12T09:32:24,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 377cad56d0197addfbd7eb4daf38d6b3, server=106923ea030f,37117,1731403942692 because future has completed 2024-11-12T09:32:24,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T09:32:24,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 377cad56d0197addfbd7eb4daf38d6b3, server=106923ea030f,37117,1731403942692 in 197 msec 2024-11-12T09:32:24,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T09:32:24,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=377cad56d0197addfbd7eb4daf38d6b3, ASSIGN in 361 msec 2024-11-12T09:32:24,453 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T09:32:24,454 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731403944453"}]},"ts":"1731403944453"} 2024-11-12T09:32:24,457 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T09:32:24,458 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T09:32:24,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 414 msec 2024-11-12T09:32:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T09:32:24,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T09:32:24,678 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T09:32:24,678 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T09:32:24,678 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T09:32:24,678 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T09:32:24,680 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T09:32:24,680 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T09:32:24,681 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-12T09:32:24,681 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-12T09:32:24,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T09:32:24,682 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-12T09:32:24,682 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-12T09:32:24,682 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T09:32:24,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T09:32:24,686 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3., hostname=106923ea030f,37117,1731403942692, seqNum=2] 2024-11-12T09:32:24,687 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T09:32:24,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43888, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T09:32:24,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-12T09:32:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T09:32:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:24,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T09:32:24,697 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T09:32:24,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T09:32:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:24,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37117 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T09:32:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,853 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 377cad56d0197addfbd7eb4daf38d6b3 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T09:32:24,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/.tmp/cf/da191cb2f516440b976a7a5aeb7e503d is 36, key is row/cf:cq/1731403944689/Put/seqid=0 2024-11-12T09:32:24,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741839_1015 (size=4787) 2024-11-12T09:32:24,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741839_1015 (size=4787) 2024-11-12T09:32:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741839_1015 (size=4787) 2024-11-12T09:32:24,880 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/.tmp/cf/da191cb2f516440b976a7a5aeb7e503d 2024-11-12T09:32:24,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/.tmp/cf/da191cb2f516440b976a7a5aeb7e503d as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/cf/da191cb2f516440b976a7a5aeb7e503d 2024-11-12T09:32:24,898 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/cf/da191cb2f516440b976a7a5aeb7e503d, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T09:32:24,900 INFO [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 377cad56d0197addfbd7eb4daf38d6b3 in 47ms, sequenceid=5, compaction requested=false 2024-11-12T09:32:24,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 377cad56d0197addfbd7eb4daf38d6b3: 2024-11-12T09:32:24,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:24,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/106923ea030f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T09:32:24,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T09:32:24,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T09:32:24,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-12T09:32:24,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 216 msec 2024-11-12T09:32:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T09:32:25,017 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T09:32:25,021 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T09:32:25,021 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T09:32:25,021 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:25,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,022 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T09:32:25,022 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T09:32:25,022 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=44033319, stopped=false 2024-11-12T09:32:25,022 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=106923ea030f,45249,1731403942426 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:25,088 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T09:32:25,088 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T09:32:25,089 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:25,089 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:25,089 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:25,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,089 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:25,089 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,36723,1731403942608' ***** 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,45641,1731403942651' ***** 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '106923ea030f,37117,1731403942692' ***** 2024-11-12T09:32:25,089 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T09:32:25,089 INFO [RS:0;106923ea030f:36723 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:25,089 INFO [RS:1;106923ea030f:45641 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:25,089 INFO [RS:0;106923ea030f:36723 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:25,089 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:25,089 INFO [RS:0;106923ea030f:36723 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:25,090 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,36723,1731403942608 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;106923ea030f:36723. 2024-11-12T09:32:25,090 INFO [RS:2;106923ea030f:37117 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T09:32:25,090 DEBUG [RS:0;106923ea030f:36723 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:25,090 DEBUG [RS:0;106923ea030f:36723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,090 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,45641,1731403942651 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;106923ea030f:45641. 2024-11-12T09:32:25,090 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T09:32:25,090 DEBUG [RS:1;106923ea030f:45641 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:25,090 DEBUG [RS:1;106923ea030f:45641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,090 INFO [RS:2;106923ea030f:37117 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T09:32:25,090 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,45641,1731403942651; all regions closed. 2024-11-12T09:32:25,090 INFO [RS:2;106923ea030f:37117 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T09:32:25,091 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(3091): Received CLOSE for 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:25,091 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T09:32:25,091 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T09:32:25,091 DEBUG [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T09:32:25,091 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T09:32:25,091 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(959): stopping server 106923ea030f,37117,1731403942692 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T09:32:25,091 INFO [RS:2;106923ea030f:37117 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T09:32:25,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T09:32:25,091 INFO [RS:2;106923ea030f:37117 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;106923ea030f:37117. 2024-11-12T09:32:25,091 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,091 DEBUG [RS:2;106923ea030f:37117 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T09:32:25,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,091 DEBUG [RS:2;106923ea030f:37117 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,091 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 377cad56d0197addfbd7eb4daf38d6b3, disabling compactions & flushes 2024-11-12T09:32:25,091 INFO [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:25,091 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T09:32:25,091 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:25,091 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1325): Online Regions={377cad56d0197addfbd7eb4daf38d6b3=TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3.} 2024-11-12T09:32:25,092 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. after waiting 0 ms 2024-11-12T09:32:25,092 DEBUG [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1351): Waiting on 377cad56d0197addfbd7eb4daf38d6b3 2024-11-12T09:32:25,092 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:25,092 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,092 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741834_1010 (size=93) 2024-11-12T09:32:25,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741834_1010 (size=93) 2024-11-12T09:32:25,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741834_1010 (size=93) 2024-11-12T09:32:25,101 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/default/TestHBaseWalOnEC/377cad56d0197addfbd7eb4daf38d6b3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T09:32:25,101 DEBUG [RS:1;106923ea030f:45641 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs 2024-11-12T09:32:25,101 INFO [RS:1;106923ea030f:45641 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 106923ea030f%2C45641%2C1731403942651:(num 1731403943508) 2024-11-12T09:32:25,101 DEBUG [RS:1;106923ea030f:45641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,101 INFO [RS:1;106923ea030f:45641 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,101 INFO [RS:1;106923ea030f:45641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:25,102 INFO [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:25,102 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:25,102 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 377cad56d0197addfbd7eb4daf38d6b3: Waiting for close lock at 1731403945091Running coprocessor pre-close hooks at 1731403945091Disabling compacts and flushes for region at 1731403945091Disabling writes for close at 1731403945092 (+1 ms)Writing region close event to WAL at 1731403945092Running coprocessor post-close hooks at 1731403945101 (+9 ms)Closed at 1731403945102 (+1 ms) 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:25,102 DEBUG [RS_CLOSE_REGION-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3. 2024-11-12T09:32:25,102 INFO [RS:1;106923ea030f:45641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45641 2024-11-12T09:32:25,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,45641,1731403942651 2024-11-12T09:32:25,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:25,111 INFO [RS:1;106923ea030f:45641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:25,115 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/info/a70648aa8e8b47e89bba2a279ffc1ef4 is 153, key is TestHBaseWalOnEC,,1731403944044.377cad56d0197addfbd7eb4daf38d6b3./info:regioninfo/1731403944438/Put/seqid=0 2024-11-12T09:32:25,116 WARN [IPC Server handler 1 on default port 35775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T09:32:25,116 WARN [IPC Server handler 1 on default port 35775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T09:32:25,116 WARN [IPC Server handler 1 on default port 35775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T09:32:25,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741840_1016 (size=6637) 2024-11-12T09:32:25,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741840_1016 (size=6637) 2024-11-12T09:32:25,122 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,45641,1731403942651] 2024-11-12T09:32:25,122 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/info/a70648aa8e8b47e89bba2a279ffc1ef4 2024-11-12T09:32:25,132 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,45641,1731403942651 already deleted, retry=false 2024-11-12T09:32:25,132 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,45641,1731403942651 expired; onlineServers=2 2024-11-12T09:32:25,147 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/ns/8337cbec482d4a63bfac44aa2baa02f4 is 43, key is default/ns:d/1731403943958/Put/seqid=0 2024-11-12T09:32:25,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741841_1017 (size=5153) 2024-11-12T09:32:25,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741841_1017 (size=5153) 2024-11-12T09:32:25,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741841_1017 (size=5153) 2024-11-12T09:32:25,155 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/ns/8337cbec482d4a63bfac44aa2baa02f4 2024-11-12T09:32:25,162 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,166 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,170 INFO [regionserver/106923ea030f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,179 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/table/e5ad96c1f86d4ba3af96fb5aff631751 is 52, key is TestHBaseWalOnEC/table:state/1731403944453/Put/seqid=0 2024-11-12T09:32:25,180 WARN [IPC Server handler 0 on default port 35775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T09:32:25,180 WARN [IPC Server handler 0 on default port 35775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T09:32:25,181 WARN [IPC Server handler 0 on default port 35775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T09:32:25,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741842_1018 (size=5249) 2024-11-12T09:32:25,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741842_1018 (size=5249) 2024-11-12T09:32:25,186 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/table/e5ad96c1f86d4ba3af96fb5aff631751 2024-11-12T09:32:25,195 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/info/a70648aa8e8b47e89bba2a279ffc1ef4 as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/info/a70648aa8e8b47e89bba2a279ffc1ef4 2024-11-12T09:32:25,203 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/info/a70648aa8e8b47e89bba2a279ffc1ef4, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T09:32:25,205 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/ns/8337cbec482d4a63bfac44aa2baa02f4 as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/ns/8337cbec482d4a63bfac44aa2baa02f4 2024-11-12T09:32:25,212 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/ns/8337cbec482d4a63bfac44aa2baa02f4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T09:32:25,214 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/.tmp/table/e5ad96c1f86d4ba3af96fb5aff631751 as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/table/e5ad96c1f86d4ba3af96fb5aff631751 2024-11-12T09:32:25,221 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/table/e5ad96c1f86d4ba3af96fb5aff631751, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T09:32:25,222 INFO [RS:1;106923ea030f:45641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:25,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,222 INFO [RS:1;106923ea030f:45641 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,45641,1731403942651; zookeeper connection closed. 2024-11-12T09:32:25,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45641-0x1012e6385ae0002, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,222 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35ef6ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35ef6ae 2024-11-12T09:32:25,223 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-12T09:32:25,229 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T09:32:25,230 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T09:32:25,230 INFO [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:25,230 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731403945091Running coprocessor pre-close hooks at 1731403945091Disabling compacts and flushes for region at 1731403945091Disabling writes for close at 1731403945091Obtaining lock to block concurrent updates at 1731403945091Preparing flush snapshotting stores in 1588230740 at 1731403945091Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731403945092 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731403945093 (+1 ms)Flushing 1588230740/info: creating writer at 1731403945093Flushing 1588230740/info: appending metadata at 1731403945114 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731403945114Flushing 1588230740/ns: creating writer at 1731403945130 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731403945146 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731403945147 (+1 ms)Flushing 1588230740/table: creating writer at 1731403945164 (+17 ms)Flushing 1588230740/table: appending metadata at 1731403945178 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731403945179 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b38fa48: reopening flushed file at 1731403945193 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@302b5eab: reopening flushed file at 1731403945203 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19e5d13: reopening flushed file at 1731403945213 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1731403945223 (+10 ms)Writing region close event to WAL at 1731403945224 (+1 ms)Running coprocessor post-close hooks at 1731403945230 (+6 ms)Closed at 1731403945230 2024-11-12T09:32:25,230 DEBUG [RS_CLOSE_META-regionserver/106923ea030f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T09:32:25,291 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,36723,1731403942608; all regions closed. 2024-11-12T09:32:25,292 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(976): stopping server 106923ea030f,37117,1731403942692; all regions closed. 2024-11-12T09:32:25,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,292 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,292 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,294 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,294 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741836_1012 (size=2751) 2024-11-12T09:32:25,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741835_1011 (size=1298) 2024-11-12T09:32:25,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741836_1012 (size=2751) 2024-11-12T09:32:25,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741835_1011 (size=1298) 2024-11-12T09:32:25,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741836_1012 (size=2751) 2024-11-12T09:32:25,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741835_1011 (size=1298) 2024-11-12T09:32:25,304 DEBUG [RS:2;106923ea030f:37117 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 106923ea030f%2C37117%2C1731403942692:(num 1731403943510) 2024-11-12T09:32:25,304 DEBUG [RS:2;106923ea030f:37117 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,304 DEBUG [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs 2024-11-12T09:32:25,304 INFO [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 106923ea030f%2C36723%2C1731403942608.meta:.meta(num 1731403943893) 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T09:32:25,304 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T09:32:25,304 INFO [RS:2;106923ea030f:37117 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T09:32:25,304 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,305 INFO [RS:2;106923ea030f:37117 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:25,305 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,305 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,305 INFO [RS:2;106923ea030f:37117 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37117 2024-11-12T09:32:25,305 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,305 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:25,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741833_1009 (size=93) 2024-11-12T09:32:25,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741833_1009 (size=93) 2024-11-12T09:32:25,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741833_1009 (size=93) 2024-11-12T09:32:25,310 DEBUG [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/oldWALs 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 106923ea030f%2C36723%2C1731403942608:(num 1731403943507) 2024-11-12T09:32:25,311 DEBUG [RS:0;106923ea030f:36723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] hbase.ChoreService(370): Chore service for: regionserver/106923ea030f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:25,311 INFO [regionserver/106923ea030f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:25,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T09:32:25,311 INFO [RS:2;106923ea030f:37117 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:25,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,37117,1731403942692 2024-11-12T09:32:25,311 INFO [RS:0;106923ea030f:36723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36723 2024-11-12T09:32:25,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/106923ea030f,36723,1731403942608 2024-11-12T09:32:25,332 INFO [RS:0;106923ea030f:36723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:25,332 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,37117,1731403942692] 2024-11-12T09:32:25,343 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,37117,1731403942692 already deleted, retry=false 2024-11-12T09:32:25,343 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,37117,1731403942692 expired; onlineServers=1 2024-11-12T09:32:25,343 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [106923ea030f,36723,1731403942608] 2024-11-12T09:32:25,353 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/106923ea030f,36723,1731403942608 already deleted, retry=false 2024-11-12T09:32:25,353 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 106923ea030f,36723,1731403942608 expired; onlineServers=0 2024-11-12T09:32:25,354 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '106923ea030f,45249,1731403942426' ***** 2024-11-12T09:32:25,354 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T09:32:25,354 INFO [M:0;106923ea030f:45249 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T09:32:25,354 INFO [M:0;106923ea030f:45249 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T09:32:25,354 DEBUG [M:0;106923ea030f:45249 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T09:32:25,354 DEBUG [M:0;106923ea030f:45249 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T09:32:25,354 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T09:32:25,354 DEBUG [master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403943201 {}] cleaner.HFileCleaner(306): Exit Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.large.0-1731403943201,5,FailOnTimeoutGroup] 2024-11-12T09:32:25,354 DEBUG [master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403943204 {}] cleaner.HFileCleaner(306): Exit Thread[master/106923ea030f:0:becomeActiveMaster-HFileCleaner.small.0-1731403943204,5,FailOnTimeoutGroup] 2024-11-12T09:32:25,354 INFO [M:0;106923ea030f:45249 {}] hbase.ChoreService(370): Chore service for: master/106923ea030f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T09:32:25,355 INFO [M:0;106923ea030f:45249 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T09:32:25,355 DEBUG [M:0;106923ea030f:45249 {}] master.HMaster(1795): Stopping service threads 2024-11-12T09:32:25,355 INFO [M:0;106923ea030f:45249 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T09:32:25,355 INFO [M:0;106923ea030f:45249 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T09:32:25,355 INFO [M:0;106923ea030f:45249 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T09:32:25,355 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T09:32:25,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T09:32:25,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T09:32:25,364 DEBUG [M:0;106923ea030f:45249 {}] zookeeper.ZKUtil(347): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T09:32:25,364 WARN [M:0;106923ea030f:45249 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T09:32:25,365 INFO [M:0;106923ea030f:45249 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/.lastflushedseqids 2024-11-12T09:32:25,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741843_1019 (size=127) 2024-11-12T09:32:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741843_1019 (size=127) 2024-11-12T09:32:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741843_1019 (size=127) 2024-11-12T09:32:25,378 INFO [M:0;106923ea030f:45249 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T09:32:25,378 INFO [M:0;106923ea030f:45249 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T09:32:25,378 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T09:32:25,378 INFO [M:0;106923ea030f:45249 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:25,378 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:25,378 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T09:32:25,378 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:25,379 INFO [M:0;106923ea030f:45249 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-12T09:32:25,400 DEBUG [M:0;106923ea030f:45249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05e6bf0d89de41c9b9b13287fc0baeea is 82, key is hbase:meta,,1/info:regioninfo/1731403943931/Put/seqid=0 2024-11-12T09:32:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741844_1020 (size=5672) 2024-11-12T09:32:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741844_1020 (size=5672) 2024-11-12T09:32:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741844_1020 (size=5672) 2024-11-12T09:32:25,408 INFO [M:0;106923ea030f:45249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05e6bf0d89de41c9b9b13287fc0baeea 2024-11-12T09:32:25,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,422 INFO [RS:2;106923ea030f:37117 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:25,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37117-0x1012e6385ae0003, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,422 INFO [RS:2;106923ea030f:37117 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,37117,1731403942692; zookeeper connection closed. 2024-11-12T09:32:25,422 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ecc67a9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ecc67a9 2024-11-12T09:32:25,432 DEBUG [M:0;106923ea030f:45249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42e6462c25943cfa0f296620347b201 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731403944460/Put/seqid=0 2024-11-12T09:32:25,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741845_1021 (size=6440) 2024-11-12T09:32:25,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741845_1021 (size=6440) 2024-11-12T09:32:25,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741845_1021 (size=6440) 2024-11-12T09:32:25,440 INFO [M:0;106923ea030f:45249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42e6462c25943cfa0f296620347b201 2024-11-12T09:32:25,443 INFO [RS:0;106923ea030f:36723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:25,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,443 INFO [RS:0;106923ea030f:36723 {}] regionserver.HRegionServer(1031): Exiting; stopping=106923ea030f,36723,1731403942608; zookeeper connection closed. 2024-11-12T09:32:25,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36723-0x1012e6385ae0001, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,443 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22bbca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22bbca 2024-11-12T09:32:25,444 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T09:32:25,462 DEBUG [M:0;106923ea030f:45249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/56ff6243cc34460a864345a07687fcef is 69, key is 106923ea030f,36723,1731403942608/rs:state/1731403943290/Put/seqid=0 2024-11-12T09:32:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741846_1022 (size=5294) 2024-11-12T09:32:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741846_1022 (size=5294) 2024-11-12T09:32:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741846_1022 (size=5294) 2024-11-12T09:32:25,470 INFO [M:0;106923ea030f:45249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/56ff6243cc34460a864345a07687fcef 2024-11-12T09:32:25,477 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05e6bf0d89de41c9b9b13287fc0baeea as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/05e6bf0d89de41c9b9b13287fc0baeea 2024-11-12T09:32:25,485 INFO [M:0;106923ea030f:45249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/05e6bf0d89de41c9b9b13287fc0baeea, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T09:32:25,487 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42e6462c25943cfa0f296620347b201 as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a42e6462c25943cfa0f296620347b201 2024-11-12T09:32:25,494 INFO [M:0;106923ea030f:45249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a42e6462c25943cfa0f296620347b201, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T09:32:25,496 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/56ff6243cc34460a864345a07687fcef as hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/56ff6243cc34460a864345a07687fcef 2024-11-12T09:32:25,505 INFO [M:0;106923ea030f:45249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35775/user/jenkins/test-data/bdbf222d-5f39-e1fb-cea7-95ae3642cc4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/56ff6243cc34460a864345a07687fcef, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T09:32:25,507 INFO [M:0;106923ea030f:45249 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false 2024-11-12T09:32:25,508 INFO [M:0;106923ea030f:45249 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T09:32:25,508 DEBUG [M:0;106923ea030f:45249 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731403945378Disabling compacts and flushes for region at 1731403945378Disabling writes for close at 1731403945378Obtaining lock to block concurrent updates at 1731403945379 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731403945379Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731403945379Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731403945380 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731403945380Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731403945400 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731403945400Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731403945415 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731403945431 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731403945431Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731403945446 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731403945462 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731403945462Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@396e0c41: reopening flushed file at 1731403945476 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c50cba4: reopening flushed file at 1731403945486 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e920ba: reopening flushed file at 1731403945495 (+9 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false at 1731403945507 (+12 ms)Writing region close event to WAL at 1731403945508 (+1 ms)Closed at 1731403945508 2024-11-12T09:32:25,509 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,509 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T09:32:25,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741830_1006 (size=32686) 2024-11-12T09:32:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43253 is added to blk_1073741830_1006 (size=32686) 2024-11-12T09:32:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36297 is added to blk_1073741830_1006 (size=32686) 2024-11-12T09:32:25,512 INFO [M:0;106923ea030f:45249 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T09:32:25,512 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T09:32:25,513 INFO [M:0;106923ea030f:45249 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45249 2024-11-12T09:32:25,513 INFO [M:0;106923ea030f:45249 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T09:32:25,643 INFO [M:0;106923ea030f:45249 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T09:32:25,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45249-0x1012e6385ae0000, quorum=127.0.0.1:49717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T09:32:25,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bff0a43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:25,647 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:25,647 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:25,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:25,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:25,650 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:25,650 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:25,650 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-110200569-172.17.0.2-1731403939869 (Datanode Uuid 6922bd40-6891-44b2-9122-ee9897812a10) service to localhost/127.0.0.1:35775 2024-11-12T09:32:25,650 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:25,651 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data5/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,651 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data6/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,652 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:25,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e5e4927{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:25,655 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:25,655 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:25,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:25,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:25,657 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:25,657 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:25,657 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-110200569-172.17.0.2-1731403939869 (Datanode Uuid 3baf23ec-a6ab-4dc5-8651-0287cc548d03) service to localhost/127.0.0.1:35775 2024-11-12T09:32:25,657 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:25,658 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data3/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,658 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data4/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,658 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:25,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38e5384{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T09:32:25,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:25,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:25,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:25,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:25,661 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T09:32:25,661 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T09:32:25,662 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T09:32:25,662 WARN [BP-110200569-172.17.0.2-1731403939869 heartbeating to localhost/127.0.0.1:35775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-110200569-172.17.0.2-1731403939869 (Datanode Uuid 6ae5e573-fdff-478f-bc18-c2c087979327) service to localhost/127.0.0.1:35775 2024-11-12T09:32:25,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data1/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/cluster_9183fb4f-07cc-ee27-d96f-59781f280d8b/data/data2/current/BP-110200569-172.17.0.2-1731403939869 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T09:32:25,663 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T09:32:25,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T09:32:25,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T09:32:25,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T09:32:25,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T09:32:25,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/013fc288-a8f7-49cd-9984-1f654c711fb5/hadoop.log.dir/,STOPPED} 2024-11-12T09:32:25,677 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T09:32:25,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T09:32:25,710 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 86) - Thread LEAK? -, OpenFileDescriptor=516 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=249 (was 236) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7432 (was 7571)