2024-11-12 12:32:00,342 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 12:32:00,354 main DEBUG Took 0.010095 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 12:32:00,354 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 12:32:00,354 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 12:32:00,355 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 12:32:00,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,371 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 12:32:00,382 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,384 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,384 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,385 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,385 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,385 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,386 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,386 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,387 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,387 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,388 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,388 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,389 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,389 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,389 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,390 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,390 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,390 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,391 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,391 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,392 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,392 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,392 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,393 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 12:32:00,393 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,393 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 12:32:00,395 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 12:32:00,396 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 12:32:00,398 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 12:32:00,398 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 12:32:00,399 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 12:32:00,400 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 12:32:00,407 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 12:32:00,409 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 12:32:00,411 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 12:32:00,411 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 12:32:00,412 main DEBUG createAppenders(={Console}) 2024-11-12 12:32:00,413 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-12 12:32:00,413 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 12:32:00,413 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-12 12:32:00,414 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 12:32:00,414 main DEBUG OutputStream closed 2024-11-12 12:32:00,414 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 12:32:00,415 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 12:32:00,415 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-12 12:32:00,486 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 12:32:00,488 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 12:32:00,489 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 12:32:00,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 12:32:00,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 12:32:00,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 12:32:00,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 12:32:00,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 12:32:00,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 12:32:00,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 12:32:00,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 12:32:00,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 12:32:00,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 12:32:00,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 12:32:00,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 12:32:00,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 12:32:00,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 12:32:00,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 12:32:00,497 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 12:32:00,497 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-12 12:32:00,497 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 12:32:00,498 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-12T12:32:00,511 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-12 12:32:00,513 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 12:32:00,513 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T12:32:00,727 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d 2024-11-12T12:32:00,751 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b, deleteOnExit=true 2024-11-12T12:32:00,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/test.cache.data in system properties and HBase conf 2024-11-12T12:32:00,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T12:32:00,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir in system properties and HBase conf 2024-11-12T12:32:00,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T12:32:00,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T12:32:00,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T12:32:00,838 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T12:32:00,921 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T12:32:00,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T12:32:00,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T12:32:00,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T12:32:00,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T12:32:00,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T12:32:00,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T12:32:00,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T12:32:00,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T12:32:00,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T12:32:00,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/nfs.dump.dir in system properties and HBase conf 2024-11-12T12:32:00,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/java.io.tmpdir in system properties and HBase conf 2024-11-12T12:32:00,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T12:32:00,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T12:32:00,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T12:32:01,958 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T12:32:02,022 INFO [Time-limited test {}] log.Log(170): Logging initialized @2316ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T12:32:02,084 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:02,139 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:02,159 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:02,159 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:02,160 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T12:32:02,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:02,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:02,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:02,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/java.io.tmpdir/jetty-localhost-39301-hadoop-hdfs-3_4_1-tests_jar-_-any-11388807998564806675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T12:32:02,350 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:39301} 2024-11-12T12:32:02,350 INFO [Time-limited test {}] server.Server(415): Started @2645ms 2024-11-12T12:32:02,928 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:02,936 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:02,937 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:02,937 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:02,937 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T12:32:02,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:02,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:03,037 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/java.io.tmpdir/jetty-localhost-34671-hadoop-hdfs-3_4_1-tests_jar-_-any-6521305925095935202/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:03,038 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:34671} 2024-11-12T12:32:03,038 INFO [Time-limited test {}] server.Server(415): Started @3332ms 2024-11-12T12:32:03,085 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:03,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:03,197 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:03,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:03,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:03,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T12:32:03,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:03,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:03,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/java.io.tmpdir/jetty-localhost-40349-hadoop-hdfs-3_4_1-tests_jar-_-any-14298794277903249502/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:03,299 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:40349} 2024-11-12T12:32:03,299 INFO [Time-limited test {}] server.Server(415): Started @3594ms 2024-11-12T12:32:03,302 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:03,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:03,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:03,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:03,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:03,344 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T12:32:03,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:03,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:03,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/java.io.tmpdir/jetty-localhost-37541-hadoop-hdfs-3_4_1-tests_jar-_-any-11296050609813098356/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:03,473 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:37541} 2024-11-12T12:32:03,473 INFO [Time-limited test {}] server.Server(415): Started @3767ms 2024-11-12T12:32:03,475 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:04,883 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data4/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,883 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data3/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,909 WARN [Thread-130 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data1/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,909 WARN [Thread-131 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data2/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,910 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data5/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,910 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data6/current/BP-1258944945-172.17.0.3-1731414721461/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:04,915 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:04,942 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:04,942 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:04,973 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x402c143f3cc6d74b with lease ID 0x1ad38dd610cf0b7d: Processing first storage report for DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85 from datanode DatanodeRegistration(127.0.0.1:33531, datanodeUuid=10d2f4c6-b2c6-4e36-b701-3a37275b048d, infoPort=39749, infoSecurePort=0, ipcPort=41365, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,975 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x402c143f3cc6d74b with lease ID 0x1ad38dd610cf0b7d: from storage DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85 node DatanodeRegistration(127.0.0.1:33531, datanodeUuid=10d2f4c6-b2c6-4e36-b701-3a37275b048d, infoPort=39749, infoSecurePort=0, ipcPort=41365, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,975 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd37902ba7521c31e with lease ID 0x1ad38dd610cf0b7e: Processing first storage report for DS-3e50e75d-3f0a-4194-a888-4baa22232be3 from datanode DatanodeRegistration(127.0.0.1:44645, datanodeUuid=9581ae8b-9bbb-4d95-913e-b822260e9dd1, infoPort=42753, infoSecurePort=0, ipcPort=38219, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,976 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd37902ba7521c31e with lease ID 0x1ad38dd610cf0b7e: from storage DS-3e50e75d-3f0a-4194-a888-4baa22232be3 node DatanodeRegistration(127.0.0.1:44645, datanodeUuid=9581ae8b-9bbb-4d95-913e-b822260e9dd1, infoPort=42753, infoSecurePort=0, ipcPort=38219, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,976 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe844a75b076084a with lease ID 0x1ad38dd610cf0b7f: Processing first storage report for DS-b7175ac8-0d28-4f92-8602-0d68f80c2763 from datanode DatanodeRegistration(127.0.0.1:41173, datanodeUuid=8b4d2a64-da58-474e-a63c-790ee6a25c03, infoPort=40761, infoSecurePort=0, ipcPort=36383, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,976 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe844a75b076084a with lease ID 0x1ad38dd610cf0b7f: from storage DS-b7175ac8-0d28-4f92-8602-0d68f80c2763 node DatanodeRegistration(127.0.0.1:41173, datanodeUuid=8b4d2a64-da58-474e-a63c-790ee6a25c03, infoPort=40761, infoSecurePort=0, ipcPort=36383, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x402c143f3cc6d74b with lease ID 0x1ad38dd610cf0b7d: Processing first storage report for DS-abf76d61-fa8f-4e89-9595-4d82be8ddc8d from datanode DatanodeRegistration(127.0.0.1:33531, datanodeUuid=10d2f4c6-b2c6-4e36-b701-3a37275b048d, infoPort=39749, infoSecurePort=0, ipcPort=41365, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x402c143f3cc6d74b with lease ID 0x1ad38dd610cf0b7d: from storage DS-abf76d61-fa8f-4e89-9595-4d82be8ddc8d node DatanodeRegistration(127.0.0.1:33531, datanodeUuid=10d2f4c6-b2c6-4e36-b701-3a37275b048d, infoPort=39749, infoSecurePort=0, ipcPort=41365, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd37902ba7521c31e with lease ID 0x1ad38dd610cf0b7e: Processing first storage report for DS-435fa0f2-9660-4afe-b412-828713b61daf from datanode DatanodeRegistration(127.0.0.1:44645, datanodeUuid=9581ae8b-9bbb-4d95-913e-b822260e9dd1, infoPort=42753, infoSecurePort=0, ipcPort=38219, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd37902ba7521c31e with lease ID 0x1ad38dd610cf0b7e: from storage DS-435fa0f2-9660-4afe-b412-828713b61daf node DatanodeRegistration(127.0.0.1:44645, datanodeUuid=9581ae8b-9bbb-4d95-913e-b822260e9dd1, infoPort=42753, infoSecurePort=0, ipcPort=38219, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe844a75b076084a with lease ID 0x1ad38dd610cf0b7f: Processing first storage report for DS-90bdb6f6-e951-4a86-9b85-f717b039c178 from datanode DatanodeRegistration(127.0.0.1:41173, datanodeUuid=8b4d2a64-da58-474e-a63c-790ee6a25c03, infoPort=40761, infoSecurePort=0, ipcPort=36383, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461) 2024-11-12T12:32:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe844a75b076084a with lease ID 0x1ad38dd610cf0b7f: from storage DS-90bdb6f6-e951-4a86-9b85-f717b039c178 node DatanodeRegistration(127.0.0.1:41173, datanodeUuid=8b4d2a64-da58-474e-a63c-790ee6a25c03, infoPort=40761, infoSecurePort=0, ipcPort=36383, storageInfo=lv=-57;cid=testClusterID;nsid=722649701;c=1731414721461), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T12:32:04,982 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d 2024-11-12T12:32:05,051 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-12T12:32:05,109 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=155, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=254, ProcessCount=11, AvailableMemoryMB=7812 2024-11-12T12:32:05,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T12:32:05,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-12T12:32:05,245 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/zookeeper_0, clientPort=54297, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T12:32:05,255 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54297 2024-11-12T12:32:05,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:05,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:05,377 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:05,377 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:05,429 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:40100 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40100 dst: /127.0.0.1:33531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:05,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-12T12:32:05,852 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:05,863 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e with version=8 2024-11-12T12:32:05,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/hbase-staging 2024-11-12T12:32:05,943 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T12:32:06,180 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:06,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,195 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:06,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:06,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T12:32:06,381 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T12:32:06,390 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T12:32:06,393 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:06,414 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 96376 (auto-detected) 2024-11-12T12:32:06,415 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-12T12:32:06,432 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36145 2024-11-12T12:32:06,450 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36145 connecting to ZooKeeper ensemble=127.0.0.1:54297 2024-11-12T12:32:06,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:361450x0, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:06,594 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36145-0x1012f080e170000 connected 2024-11-12T12:32:06,684 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:06,706 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e, hbase.cluster.distributed=false 2024-11-12T12:32:06,730 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:06,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-12T12:32:06,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36145 2024-11-12T12:32:06,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36145 2024-11-12T12:32:06,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-12T12:32:06,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-12T12:32:06,830 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:06,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:06,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,833 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:06,835 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:06,838 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:06,838 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33963 2024-11-12T12:32:06,840 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33963 connecting to ZooKeeper ensemble=127.0.0.1:54297 2024-11-12T12:32:06,841 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339630x0, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:06,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33963-0x1012f080e170001 connected 2024-11-12T12:32:06,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:06,866 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:06,873 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:06,876 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:06,881 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:06,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33963 2024-11-12T12:32:06,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33963 2024-11-12T12:32:06,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33963 2024-11-12T12:32:06,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33963 2024-11-12T12:32:06,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33963 2024-11-12T12:32:06,899 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:06,899 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,900 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:06,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:06,900 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:06,901 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:06,902 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37105 2024-11-12T12:32:06,903 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37105 connecting to ZooKeeper ensemble=127.0.0.1:54297 2024-11-12T12:32:06,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,907 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371050x0, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:06,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:371050x0, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:06,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37105-0x1012f080e170002 connected 2024-11-12T12:32:06,926 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:06,927 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:06,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:06,930 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:06,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37105 2024-11-12T12:32:06,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37105 2024-11-12T12:32:06,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37105 2024-11-12T12:32:06,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37105 2024-11-12T12:32:06,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37105 2024-11-12T12:32:06,948 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:06,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,948 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:06,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:06,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:06,949 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:06,949 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:06,952 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44607 2024-11-12T12:32:06,954 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44607 connecting to ZooKeeper ensemble=127.0.0.1:54297 2024-11-12T12:32:06,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,958 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:06,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446070x0, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:06,967 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446070x0, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:06,967 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44607-0x1012f080e170003 connected 2024-11-12T12:32:06,968 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:06,968 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:06,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:06,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:06,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44607 2024-11-12T12:32:06,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44607 2024-11-12T12:32:06,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44607 2024-11-12T12:32:06,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44607 2024-11-12T12:32:06,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44607 2024-11-12T12:32:06,991 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4a91c05c96a4:36145 2024-11-12T12:32:06,992 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,011 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:07,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,042 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T12:32:07,045 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4a91c05c96a4,36145,1731414726033 from backup master directory 2024-11-12T12:32:07,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:07,060 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:07,061 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,063 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T12:32:07,065 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T12:32:07,118 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/hbase.id] with ID: 2f196675-6a3a-4be7-a9a4-12d5af8f1f72 2024-11-12T12:32:07,119 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/.tmp/hbase.id 2024-11-12T12:32:07,125 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,126 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:44514 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44514 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-12T12:32:07,136 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:07,136 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/.tmp/hbase.id]:[hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/hbase.id] 2024-11-12T12:32:07,175 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:07,179 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T12:32:07,195 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-11-12T12:32:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,221 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,221 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:40128 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:33531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40128 dst: /127.0.0.1:33531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:07,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-12T12:32:07,233 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:07,246 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T12:32:07,247 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T12:32:07,252 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T12:32:07,278 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,278 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:44526 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44526 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:07,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-12T12:32:07,287 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:07,301 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store 2024-11-12T12:32:07,315 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,315 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:07,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:44550 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44550 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:07,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-12T12:32:07,327 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:07,331 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T12:32:07,333 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:07,334 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T12:32:07,335 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:07,335 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:07,336 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T12:32:07,336 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:07,336 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:07,337 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731414727334Disabling compacts and flushes for region at 1731414727334Disabling writes for close at 1731414727336 (+2 ms)Writing region close event to WAL at 1731414727336Closed at 1731414727336 2024-11-12T12:32:07,339 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/.initializing 2024-11-12T12:32:07,339 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/WALs/4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,346 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T12:32:07,359 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C36145%2C1731414726033, suffix=, logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/WALs/4a91c05c96a4,36145,1731414726033, archiveDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/oldWALs, maxLogs=10 2024-11-12T12:32:07,384 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/WALs/4a91c05c96a4,36145,1731414726033/4a91c05c96a4%2C36145%2C1731414726033.1731414727363, exclude list is [], retry=0 2024-11-12T12:32:07,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:07,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41173,DS-b7175ac8-0d28-4f92-8602-0d68f80c2763,DISK] 2024-11-12T12:32:07,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44645,DS-3e50e75d-3f0a-4194-a888-4baa22232be3,DISK] 2024-11-12T12:32:07,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33531,DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85,DISK] 2024-11-12T12:32:07,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-12T12:32:07,440 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/WALs/4a91c05c96a4,36145,1731414726033/4a91c05c96a4%2C36145%2C1731414726033.1731414727363 2024-11-12T12:32:07,440 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42753:42753),(127.0.0.1/127.0.0.1:39749:39749),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-12T12:32:07,441 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:07,441 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:07,444 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,444 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T12:32:07,505 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:07,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:07,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T12:32:07,512 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:07,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:07,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T12:32:07,517 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:07,518 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:07,518 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T12:32:07,521 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:07,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:07,522 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,525 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,526 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,531 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,531 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,534 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:07,537 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:07,543 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:07,545 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61129069, jitterRate=-0.08910588920116425}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:07,552 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731414727455Initializing all the Stores at 1731414727457 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414727458 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414727458Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414727458Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414727458Cleaning up temporary data from old regions at 1731414727531 (+73 ms)Region opened successfully at 1731414727552 (+21 ms) 2024-11-12T12:32:07,553 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T12:32:07,588 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95957c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:07,617 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T12:32:07,626 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T12:32:07,626 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T12:32:07,629 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T12:32:07,630 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-12T12:32:07,635 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-12T12:32:07,635 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T12:32:07,661 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T12:32:07,670 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T12:32:07,714 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T12:32:07,717 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T12:32:07,719 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T12:32:07,732 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T12:32:07,735 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T12:32:07,740 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T12:32:07,753 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T12:32:07,755 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T12:32:07,766 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T12:32:07,787 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T12:32:07,798 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:07,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,814 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4a91c05c96a4,36145,1731414726033, sessionid=0x1012f080e170000, setting cluster-up flag (Was=false) 2024-11-12T12:32:07,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,872 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T12:32:07,877 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:07,935 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T12:32:07,937 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:07,945 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T12:32:07,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-12T12:32:07,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-12T12:32:07,981 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(746): ClusterId : 2f196675-6a3a-4be7-a9a4-12d5af8f1f72 2024-11-12T12:32:07,981 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(746): ClusterId : 2f196675-6a3a-4be7-a9a4-12d5af8f1f72 2024-11-12T12:32:07,981 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(746): ClusterId : 2f196675-6a3a-4be7-a9a4-12d5af8f1f72 2024-11-12T12:32:07,984 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:07,984 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:07,984 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:08,000 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:08,000 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:08,000 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:08,000 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:08,000 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:08,000 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:08,009 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:08,009 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:08,009 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:08,010 DEBUG [RS:0;4a91c05c96a4:33963 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b60cfd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:08,010 DEBUG [RS:1;4a91c05c96a4:37105 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2acfd1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:08,010 DEBUG [RS:2;4a91c05c96a4:44607 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bd8ecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:08,015 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:08,025 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;4a91c05c96a4:44607 2024-11-12T12:32:08,027 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T12:32:08,029 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4a91c05c96a4:33963 2024-11-12T12:32:08,029 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4a91c05c96a4:37105 2024-11-12T12:32:08,029 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:08,029 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:08,029 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:08,029 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:08,029 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:08,029 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:08,030 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:08,030 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:08,030 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:08,032 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,36145,1731414726033 with port=33963, startcode=1731414726800 2024-11-12T12:32:08,032 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,36145,1731414726033 with port=44607, startcode=1731414726947 2024-11-12T12:32:08,032 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,36145,1731414726033 with port=37105, startcode=1731414726899 2024-11-12T12:32:08,036 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T12:32:08,044 DEBUG [RS:2;4a91c05c96a4:44607 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:08,044 DEBUG [RS:1;4a91c05c96a4:37105 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:08,044 DEBUG [RS:0;4a91c05c96a4:33963 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:08,043 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4a91c05c96a4,36145,1731414726033 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T12:32:08,051 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4a91c05c96a4:0, corePoolSize=10, maxPoolSize=10 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,052 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:08,053 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,054 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731414758054 2024-11-12T12:32:08,056 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T12:32:08,057 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T12:32:08,061 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T12:32:08,061 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T12:32:08,061 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T12:32:08,061 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T12:32:08,065 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,069 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T12:32:08,070 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T12:32:08,070 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T12:32:08,072 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T12:32:08,073 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:08,073 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T12:32:08,074 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T12:32:08,077 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414728075,5,FailOnTimeoutGroup] 2024-11-12T12:32:08,080 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414728077,5,FailOnTimeoutGroup] 2024-11-12T12:32:08,080 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,080 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,080 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T12:32:08,081 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T12:32:08,081 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,082 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,086 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50989, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:08,086 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:08,086 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40069, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:08,094 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:08,094 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:08,097 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:44586 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44586 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:08,100 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-12T12:32:08,108 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:08,109 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T12:32:08,110 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e 2024-11-12T12:32:08,113 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,113 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,117 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e 2024-11-12T12:32:08,117 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32985 2024-11-12T12:32:08,117 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:08,118 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,118 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:08,118 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e 2024-11-12T12:32:08,118 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:08,118 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32985 2024-11-12T12:32:08,118 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,118 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:08,122 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e 2024-11-12T12:32:08,122 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32985 2024-11-12T12:32:08,122 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:08,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:44594 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44594 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:08,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:08,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-12T12:32:08,137 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:08,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:08,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T12:32:08,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T12:32:08,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T12:32:08,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T12:32:08,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T12:32:08,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T12:32:08,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T12:32:08,160 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T12:32:08,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T12:32:08,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740 2024-11-12T12:32:08,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740 2024-11-12T12:32:08,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T12:32:08,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T12:32:08,170 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:08,172 DEBUG [RS:2;4a91c05c96a4:44607 {}] zookeeper.ZKUtil(111): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,173 DEBUG [RS:0;4a91c05c96a4:33963 {}] zookeeper.ZKUtil(111): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,173 WARN [RS:0;4a91c05c96a4:33963 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:08,173 WARN [RS:2;4a91c05c96a4:44607 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:08,173 INFO [RS:0;4a91c05c96a4:33963 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T12:32:08,173 INFO [RS:2;4a91c05c96a4:44607 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T12:32:08,173 DEBUG [RS:1;4a91c05c96a4:37105 {}] zookeeper.ZKUtil(111): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,173 WARN [RS:1;4a91c05c96a4:37105 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:08,173 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,173 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,173 INFO [RS:1;4a91c05c96a4:37105 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T12:32:08,173 DEBUG [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,44607,1731414726947] 2024-11-12T12:32:08,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,37105,1731414726899] 2024-11-12T12:32:08,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,33963,1731414726800] 2024-11-12T12:32:08,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T12:32:08,203 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:08,203 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:08,203 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:08,205 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:08,206 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61030167, jitterRate=-0.09057964384555817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:08,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731414728139Initializing all the Stores at 1731414728141 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728141Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728141Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414728141Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728141Cleaning up temporary data from old regions at 1731414728168 (+27 ms)Region opened successfully at 1731414728211 (+43 ms) 2024-11-12T12:32:08,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T12:32:08,212 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T12:32:08,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T12:32:08,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T12:32:08,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T12:32:08,214 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:08,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731414728211Disabling compacts and flushes for region at 1731414728211Disabling writes for close at 1731414728212 (+1 ms)Writing region close event to WAL at 1731414728213 (+1 ms)Closed at 1731414728213 2024-11-12T12:32:08,217 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:08,217 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T12:32:08,222 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:08,222 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:08,222 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:08,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T12:32:08,227 INFO [RS:2;4a91c05c96a4:44607 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:08,227 INFO [RS:0;4a91c05c96a4:33963 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:08,227 INFO [RS:1;4a91c05c96a4:37105 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:08,228 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,228 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,228 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,229 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:08,231 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:08,231 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:08,235 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:08,235 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:08,235 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T12:32:08,235 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:08,237 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,237 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,237 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,237 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,237 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,237 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,237 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:08,238 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:08,238 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,238 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:08,239 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:2;4a91c05c96a4:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,239 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T12:32:08,240 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,239 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,240 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,240 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,240 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:08,240 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,240 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,240 DEBUG [RS:0;4a91c05c96a4:33963 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,240 DEBUG [RS:1;4a91c05c96a4:37105 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:08,244 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,244 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,245 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,245 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,245 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,245 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,44607,1731414726947-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:08,248 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,248 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,249 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,249 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,249 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,249 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33963,1731414726800-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:08,252 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,252 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,252 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,253 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,253 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,253 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,37105,1731414726899-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:08,269 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:08,271 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:08,272 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,37105,1731414726899-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,272 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,44607,1731414726947-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,272 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,272 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,272 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.Replication(171): 4a91c05c96a4,37105,1731414726899 started 2024-11-12T12:32:08,272 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.Replication(171): 4a91c05c96a4,44607,1731414726947 started 2024-11-12T12:32:08,274 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:08,274 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33963,1731414726800-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,274 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,274 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.Replication(171): 4a91c05c96a4,33963,1731414726800 started 2024-11-12T12:32:08,291 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,291 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,37105,1731414726899, RpcServer on 4a91c05c96a4/172.17.0.3:37105, sessionid=0x1012f080e170002 2024-11-12T12:32:08,292 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:08,292 DEBUG [RS:1;4a91c05c96a4:37105 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,292 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,37105,1731414726899' 2024-11-12T12:32:08,292 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:08,293 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:08,294 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:08,294 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:08,294 DEBUG [RS:1;4a91c05c96a4:37105 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:08,294 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,37105,1731414726899' 2024-11-12T12:32:08,294 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:08,295 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:08,296 DEBUG [RS:1;4a91c05c96a4:37105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:08,296 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,296 INFO [RS:1;4a91c05c96a4:37105 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:08,296 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,44607,1731414726947, RpcServer on 4a91c05c96a4/172.17.0.3:44607, sessionid=0x1012f080e170003 2024-11-12T12:32:08,296 INFO [RS:1;4a91c05c96a4:37105 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:08,296 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:08,296 DEBUG [RS:2;4a91c05c96a4:44607 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,296 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,44607,1731414726947' 2024-11-12T12:32:08,296 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:08,296 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:08,297 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,33963,1731414726800, RpcServer on 4a91c05c96a4/172.17.0.3:33963, sessionid=0x1012f080e170001 2024-11-12T12:32:08,297 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:08,297 DEBUG [RS:0;4a91c05c96a4:33963 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,297 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,33963,1731414726800' 2024-11-12T12:32:08,297 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:08,297 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,44607,1731414726947' 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,33963,1731414726800' 2024-11-12T12:32:08,298 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:08,298 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:08,299 DEBUG [RS:2;4a91c05c96a4:44607 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:08,299 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:08,299 INFO [RS:2;4a91c05c96a4:44607 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:08,299 INFO [RS:2;4a91c05c96a4:44607 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:08,300 DEBUG [RS:0;4a91c05c96a4:33963 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:08,300 INFO [RS:0;4a91c05c96a4:33963 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:08,300 INFO [RS:0;4a91c05c96a4:33963 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:08,391 WARN [4a91c05c96a4:36145 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T12:32:08,403 INFO [RS:2;4a91c05c96a4:44607 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T12:32:08,403 INFO [RS:0;4a91c05c96a4:33963 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T12:32:08,403 INFO [RS:1;4a91c05c96a4:37105 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T12:32:08,407 INFO [RS:1;4a91c05c96a4:37105 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C37105%2C1731414726899, suffix=, logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,37105,1731414726899, archiveDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs, maxLogs=32 2024-11-12T12:32:08,407 INFO [RS:2;4a91c05c96a4:44607 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C44607%2C1731414726947, suffix=, logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,44607,1731414726947, archiveDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs, maxLogs=32 2024-11-12T12:32:08,407 INFO [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C33963%2C1731414726800, suffix=, logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800, archiveDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs, maxLogs=32 2024-11-12T12:32:08,424 DEBUG [RS:1;4a91c05c96a4:37105 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,37105,1731414726899/4a91c05c96a4%2C37105%2C1731414726899.1731414728410, exclude list is [], retry=0 2024-11-12T12:32:08,429 DEBUG [RS:0;4a91c05c96a4:33963 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800/4a91c05c96a4%2C33963%2C1731414726800.1731414728411, exclude list is [], retry=0 2024-11-12T12:32:08,429 DEBUG [RS:2;4a91c05c96a4:44607 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,44607,1731414726947/4a91c05c96a4%2C44607%2C1731414726947.1731414728411, exclude list is [], retry=0 2024-11-12T12:32:08,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44645,DS-3e50e75d-3f0a-4194-a888-4baa22232be3,DISK] 2024-11-12T12:32:08,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41173,DS-b7175ac8-0d28-4f92-8602-0d68f80c2763,DISK] 2024-11-12T12:32:08,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33531,DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85,DISK] 2024-11-12T12:32:08,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41173,DS-b7175ac8-0d28-4f92-8602-0d68f80c2763,DISK] 2024-11-12T12:32:08,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44645,DS-3e50e75d-3f0a-4194-a888-4baa22232be3,DISK] 2024-11-12T12:32:08,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33531,DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85,DISK] 2024-11-12T12:32:08,463 INFO [RS:1;4a91c05c96a4:37105 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,37105,1731414726899/4a91c05c96a4%2C37105%2C1731414726899.1731414728410 2024-11-12T12:32:08,464 DEBUG [RS:1;4a91c05c96a4:37105 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42753:42753),(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39749:39749)] 2024-11-12T12:32:08,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41173,DS-b7175ac8-0d28-4f92-8602-0d68f80c2763,DISK] 2024-11-12T12:32:08,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44645,DS-3e50e75d-3f0a-4194-a888-4baa22232be3,DISK] 2024-11-12T12:32:08,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33531,DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85,DISK] 2024-11-12T12:32:08,470 INFO [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800/4a91c05c96a4%2C33963%2C1731414726800.1731414728411 2024-11-12T12:32:08,472 DEBUG [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39749:39749),(127.0.0.1/127.0.0.1:42753:42753)] 2024-11-12T12:32:08,473 INFO [RS:2;4a91c05c96a4:44607 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,44607,1731414726947/4a91c05c96a4%2C44607%2C1731414726947.1731414728411 2024-11-12T12:32:08,474 DEBUG [RS:2;4a91c05c96a4:44607 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42753:42753),(127.0.0.1/127.0.0.1:39749:39749),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-12T12:32:08,647 DEBUG [4a91c05c96a4:36145 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T12:32:08,657 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(204): Hosts are {4a91c05c96a4=0} racks are {/default-rack=0} 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T12:32:08,663 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T12:32:08,664 INFO [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T12:32:08,664 INFO [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T12:32:08,664 INFO [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T12:32:08,664 DEBUG [4a91c05c96a4:36145 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T12:32:08,670 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:08,675 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4a91c05c96a4,33963,1731414726800, state=OPENING 2024-11-12T12:32:08,724 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T12:32:08,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:08,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:08,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:08,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:08,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:08,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:08,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:08,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:08,737 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T12:32:08,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4a91c05c96a4,33963,1731414726800}] 2024-11-12T12:32:08,913 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T12:32:08,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34823, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T12:32:08,926 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T12:32:08,926 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T12:32:08,927 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-12T12:32:08,930 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C33963%2C1731414726800.meta, suffix=.meta, logDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800, archiveDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs, maxLogs=32 2024-11-12T12:32:08,945 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800/4a91c05c96a4%2C33963%2C1731414726800.meta.1731414728932.meta, exclude list is [], retry=0 2024-11-12T12:32:08,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41173,DS-b7175ac8-0d28-4f92-8602-0d68f80c2763,DISK] 2024-11-12T12:32:08,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33531,DS-0e482dcf-b316-4eb2-adb1-98e5f6297d85,DISK] 2024-11-12T12:32:08,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44645,DS-3e50e75d-3f0a-4194-a888-4baa22232be3,DISK] 2024-11-12T12:32:08,954 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/WALs/4a91c05c96a4,33963,1731414726800/4a91c05c96a4%2C33963%2C1731414726800.meta.1731414728932.meta 2024-11-12T12:32:08,955 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39749:39749),(127.0.0.1/127.0.0.1:42753:42753)] 2024-11-12T12:32:08,955 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:08,957 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T12:32:08,959 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T12:32:08,964 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T12:32:08,968 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T12:32:08,968 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:08,968 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T12:32:08,968 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T12:32:08,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T12:32:08,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T12:32:08,973 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T12:32:08,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T12:32:08,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T12:32:08,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T12:32:08,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T12:32:08,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T12:32:08,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:08,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:08,981 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T12:32:08,983 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740 2024-11-12T12:32:08,986 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740 2024-11-12T12:32:08,988 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T12:32:08,989 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T12:32:08,989 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:08,992 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T12:32:08,993 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74989127, jitterRate=0.11742506921291351}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:08,993 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T12:32:08,995 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731414728969Writing region info on filesystem at 1731414728969Initializing all the Stores at 1731414728971 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728971Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728971Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414728972 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414728972Cleaning up temporary data from old regions at 1731414728989 (+17 ms)Running coprocessor post-open hooks at 1731414728993 (+4 ms)Region opened successfully at 1731414728995 (+2 ms) 2024-11-12T12:32:09,000 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731414728906 2024-11-12T12:32:09,015 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T12:32:09,015 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T12:32:09,016 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:09,019 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4a91c05c96a4,33963,1731414726800, state=OPEN 2024-11-12T12:32:09,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:09,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:09,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:09,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:09,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:09,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:09,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:09,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:09,030 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:09,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T12:32:09,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4a91c05c96a4,33963,1731414726800 in 291 msec 2024-11-12T12:32:09,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T12:32:09,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 813 msec 2024-11-12T12:32:09,044 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:09,045 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T12:32:09,061 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T12:32:09,062 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4a91c05c96a4,33963,1731414726800, seqNum=-1] 2024-11-12T12:32:09,079 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:09,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:09,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1240 sec 2024-11-12T12:32:09,119 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731414729119, completionTime=-1 2024-11-12T12:32:09,122 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T12:32:09,122 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T12:32:09,150 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T12:32:09,150 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731414789150 2024-11-12T12:32:09,150 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731414849150 2024-11-12T12:32:09,150 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-12T12:32:09,151 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T12:32:09,157 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,157 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,157 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,159 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4a91c05c96a4:36145, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,159 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,160 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,164 DEBUG [master/4a91c05c96a4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T12:32:09,184 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.123sec 2024-11-12T12:32:09,185 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T12:32:09,186 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T12:32:09,187 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T12:32:09,188 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T12:32:09,188 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T12:32:09,188 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:09,189 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T12:32:09,193 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T12:32:09,194 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T12:32:09,194 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,36145,1731414726033-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:09,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f14512f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:09,300 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T12:32:09,300 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T12:32:09,303 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4a91c05c96a4,36145,-1 for getting cluster id 2024-11-12T12:32:09,305 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T12:32:09,312 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2f196675-6a3a-4be7-a9a4-12d5af8f1f72' 2024-11-12T12:32:09,314 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T12:32:09,315 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2f196675-6a3a-4be7-a9a4-12d5af8f1f72" 2024-11-12T12:32:09,315 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@416f62f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:09,315 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4a91c05c96a4,36145,-1] 2024-11-12T12:32:09,317 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T12:32:09,319 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:09,320 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T12:32:09,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16c8a73d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:09,323 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T12:32:09,329 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4a91c05c96a4,33963,1731414726800, seqNum=-1] 2024-11-12T12:32:09,329 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:09,332 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35464, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:09,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:09,354 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T12:32:09,358 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:09,360 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@26332ca6 2024-11-12T12:32:09,361 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T12:32:09,363 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T12:32:09,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T12:32:09,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T12:32:09,380 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T12:32:09,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T12:32:09,382 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:09,385 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T12:32:09,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:09,393 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:09,394 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:09,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52710 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52710 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:09,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-12T12:32:09,406 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:09,408 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 87c2f99e9a002bd4dcc44f717be73b0b, NAME => 'TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e 2024-11-12T12:32:09,414 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:09,415 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:09,417 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52730 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52730 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:09,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-12T12:32:09,422 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:09,422 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:09,423 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 87c2f99e9a002bd4dcc44f717be73b0b, disabling compactions & flushes 2024-11-12T12:32:09,423 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,423 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,423 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. after waiting 0 ms 2024-11-12T12:32:09,423 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,423 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,423 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 87c2f99e9a002bd4dcc44f717be73b0b: Waiting for close lock at 1731414729423Disabling compacts and flushes for region at 1731414729423Disabling writes for close at 1731414729423Writing region close event to WAL at 1731414729423Closed at 1731414729423 2024-11-12T12:32:09,426 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T12:32:09,432 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731414729426"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731414729426"}]},"ts":"1731414729426"} 2024-11-12T12:32:09,437 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T12:32:09,439 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T12:32:09,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731414729439"}]},"ts":"1731414729439"} 2024-11-12T12:32:09,447 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T12:32:09,448 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4a91c05c96a4=0} racks are {/default-rack=0} 2024-11-12T12:32:09,449 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T12:32:09,450 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T12:32:09,450 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T12:32:09,450 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T12:32:09,450 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T12:32:09,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=87c2f99e9a002bd4dcc44f717be73b0b, ASSIGN}] 2024-11-12T12:32:09,455 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=87c2f99e9a002bd4dcc44f717be73b0b, ASSIGN 2024-11-12T12:32:09,457 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=87c2f99e9a002bd4dcc44f717be73b0b, ASSIGN; state=OFFLINE, location=4a91c05c96a4,44607,1731414726947; forceNewPlan=false, retain=false 2024-11-12T12:32:09,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:09,610 INFO [4a91c05c96a4:36145 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T12:32:09,611 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87c2f99e9a002bd4dcc44f717be73b0b, regionState=OPENING, regionLocation=4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:09,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=87c2f99e9a002bd4dcc44f717be73b0b, ASSIGN because future has completed 2024-11-12T12:32:09,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c2f99e9a002bd4dcc44f717be73b0b, server=4a91c05c96a4,44607,1731414726947}] 2024-11-12T12:32:09,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:09,772 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T12:32:09,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T12:32:09,785 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,785 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 87c2f99e9a002bd4dcc44f717be73b0b, NAME => 'TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:09,786 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,786 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:09,786 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,786 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,789 INFO [StoreOpener-87c2f99e9a002bd4dcc44f717be73b0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,791 INFO [StoreOpener-87c2f99e9a002bd4dcc44f717be73b0b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87c2f99e9a002bd4dcc44f717be73b0b columnFamilyName cf 2024-11-12T12:32:09,791 DEBUG [StoreOpener-87c2f99e9a002bd4dcc44f717be73b0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:09,792 INFO [StoreOpener-87c2f99e9a002bd4dcc44f717be73b0b-1 {}] regionserver.HStore(327): Store=87c2f99e9a002bd4dcc44f717be73b0b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:09,793 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,794 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,795 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,795 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,795 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,798 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,803 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:09,804 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 87c2f99e9a002bd4dcc44f717be73b0b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67004706, jitterRate=-0.0015520751476287842}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T12:32:09,804 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:09,805 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 87c2f99e9a002bd4dcc44f717be73b0b: Running coprocessor pre-open hook at 1731414729786Writing region info on filesystem at 1731414729786Initializing all the Stores at 1731414729789 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414729789Cleaning up temporary data from old regions at 1731414729795 (+6 ms)Running coprocessor post-open hooks at 1731414729804 (+9 ms)Region opened successfully at 1731414729805 (+1 ms) 2024-11-12T12:32:09,807 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b., pid=6, masterSystemTime=1731414729771 2024-11-12T12:32:09,810 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,810 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:09,811 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87c2f99e9a002bd4dcc44f717be73b0b, regionState=OPEN, openSeqNum=2, regionLocation=4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:09,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c2f99e9a002bd4dcc44f717be73b0b, server=4a91c05c96a4,44607,1731414726947 because future has completed 2024-11-12T12:32:09,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T12:32:09,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 87c2f99e9a002bd4dcc44f717be73b0b, server=4a91c05c96a4,44607,1731414726947 in 199 msec 2024-11-12T12:32:09,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T12:32:09,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=87c2f99e9a002bd4dcc44f717be73b0b, ASSIGN in 368 msec 2024-11-12T12:32:09,825 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T12:32:09,825 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731414729825"}]},"ts":"1731414729825"} 2024-11-12T12:32:09,827 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T12:32:09,829 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T12:32:09,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 456 msec 2024-11-12T12:32:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:10,020 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T12:32:10,020 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T12:32:10,022 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T12:32:10,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T12:32:10,031 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T12:32:10,031 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T12:32:10,044 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b., hostname=4a91c05c96a4,44607,1731414726947, seqNum=2] 2024-11-12T12:32:10,045 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:10,047 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42672, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:10,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T12:32:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T12:32:10,061 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T12:32:10,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:10,063 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T12:32:10,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T12:32:10,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:10,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44607 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T12:32:10,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,239 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 87c2f99e9a002bd4dcc44f717be73b0b 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T12:32:10,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/.tmp/cf/f05aba11613548e6836f2e2dc2366979 is 36, key is row/cf:cq/1731414730048/Put/seqid=0 2024-11-12T12:32:10,303 WARN [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,303 WARN [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,307 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1043053653_22 at /127.0.0.1:52756 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52756 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:10,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-12T12:32:10,315 WARN [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:10,315 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/.tmp/cf/f05aba11613548e6836f2e2dc2366979 2024-11-12T12:32:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/.tmp/cf/f05aba11613548e6836f2e2dc2366979 as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/cf/f05aba11613548e6836f2e2dc2366979 2024-11-12T12:32:10,373 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/cf/f05aba11613548e6836f2e2dc2366979, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T12:32:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:10,381 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 87c2f99e9a002bd4dcc44f717be73b0b in 140ms, sequenceid=5, compaction requested=false 2024-11-12T12:32:10,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-12T12:32:10,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 87c2f99e9a002bd4dcc44f717be73b0b: 2024-11-12T12:32:10,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T12:32:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T12:32:10,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T12:32:10,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 324 msec 2024-11-12T12:32:10,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 338 msec 2024-11-12T12:32:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:10,687 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T12:32:10,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T12:32:10,700 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T12:32:10,700 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:10,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,705 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T12:32:10,706 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T12:32:10,706 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=887671035, stopped=false 2024-11-12T12:32:10,706 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4a91c05c96a4,36145,1731414726033 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:10,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:10,830 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T12:32:10,832 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T12:32:10,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:10,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:10,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:10,832 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:10,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:10,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,834 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,33963,1731414726800' ***** 2024-11-12T12:32:10,834 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:10,834 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,37105,1731414726899' ***** 2024-11-12T12:32:10,834 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:10,835 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,44607,1731414726947' ***** 2024-11-12T12:32:10,835 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:10,835 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:10,835 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:10,835 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:10,835 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:10,835 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:10,836 INFO [RS:1;4a91c05c96a4:37105 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:10,836 INFO [RS:2;4a91c05c96a4:44607 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:10,836 INFO [RS:0;4a91c05c96a4:33963 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:10,836 INFO [RS:1;4a91c05c96a4:37105 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:10,836 INFO [RS:0;4a91c05c96a4:33963 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:10,836 INFO [RS:2;4a91c05c96a4:44607 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:10,836 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:10,836 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:10,836 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:10,836 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(3091): Received CLOSE for 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:10,836 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:10,836 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:10,836 INFO [RS:1;4a91c05c96a4:37105 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4a91c05c96a4:37105. 2024-11-12T12:32:10,836 INFO [RS:0;4a91c05c96a4:33963 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4a91c05c96a4:33963. 2024-11-12T12:32:10,836 DEBUG [RS:0;4a91c05c96a4:33963 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:10,836 DEBUG [RS:1;4a91c05c96a4:37105 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:10,836 DEBUG [RS:1;4a91c05c96a4:37105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,836 DEBUG [RS:0;4a91c05c96a4:33963 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,837 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:10,837 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:10,837 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:10,837 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,37105,1731414726899; all regions closed. 2024-11-12T12:32:10,837 INFO [RS:2;4a91c05c96a4:44607 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;4a91c05c96a4:44607. 2024-11-12T12:32:10,837 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:10,837 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:10,837 DEBUG [RS:2;4a91c05c96a4:44607 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:10,837 DEBUG [RS:2;4a91c05c96a4:44607 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,837 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T12:32:10,837 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T12:32:10,837 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 87c2f99e9a002bd4dcc44f717be73b0b, disabling compactions & flushes 2024-11-12T12:32:10,837 INFO [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,837 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1325): Online Regions={87c2f99e9a002bd4dcc44f717be73b0b=TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b.} 2024-11-12T12:32:10,837 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,837 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. after waiting 0 ms 2024-11-12T12:32:10,838 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,838 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T12:32:10,838 DEBUG [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1351): Waiting on 87c2f99e9a002bd4dcc44f717be73b0b 2024-11-12T12:32:10,838 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T12:32:10,838 DEBUG [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T12:32:10,838 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T12:32:10,838 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T12:32:10,838 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T12:32:10,838 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T12:32:10,838 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T12:32:10,838 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T12:32:10,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_1073741826_1016 (size=93) 2024-11-12T12:32:10,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_1073741826_1016 (size=93) 2024-11-12T12:32:10,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_1073741826_1016 (size=93) 2024-11-12T12:32:10,847 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/default/TestHBaseWalOnEC/87c2f99e9a002bd4dcc44f717be73b0b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T12:32:10,849 INFO [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,849 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 87c2f99e9a002bd4dcc44f717be73b0b: Waiting for close lock at 1731414730837Running coprocessor pre-close hooks at 1731414730837Disabling compacts and flushes for region at 1731414730837Disabling writes for close at 1731414730838 (+1 ms)Writing region close event to WAL at 1731414730839 (+1 ms)Running coprocessor post-close hooks at 1731414730848 (+9 ms)Closed at 1731414730849 (+1 ms) 2024-11-12T12:32:10,849 DEBUG [RS:1;4a91c05c96a4:37105 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs 2024-11-12T12:32:10,849 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b. 2024-11-12T12:32:10,849 INFO [RS:1;4a91c05c96a4:37105 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4a91c05c96a4%2C37105%2C1731414726899:(num 1731414728410) 2024-11-12T12:32:10,849 DEBUG [RS:1;4a91c05c96a4:37105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:10,849 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:10,850 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:10,850 INFO [RS:1;4a91c05c96a4:37105 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37105 2024-11-12T12:32:10,851 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:10,859 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:10,859 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:10,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,37105,1731414726899 2024-11-12T12:32:10,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:10,861 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:10,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,37105,1731414726899] 2024-11-12T12:32:10,878 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/info/8bb41ddfe051449f87b864ffb341c215 is 153, key is TestHBaseWalOnEC,,1731414729365.87c2f99e9a002bd4dcc44f717be73b0b./info:regioninfo/1731414729811/Put/seqid=0 2024-11-12T12:32:10,881 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,881 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,882 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,37105,1731414726899 already deleted, retry=false 2024-11-12T12:32:10,882 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,37105,1731414726899 expired; onlineServers=2 2024-11-12T12:32:10,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670188824_22 at /127.0.0.1:44656 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44656 dst: /127.0.0.1:44645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:10,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-12T12:32:10,891 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:10,891 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/info/8bb41ddfe051449f87b864ffb341c215 2024-11-12T12:32:10,916 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/ns/fbb33b0a4c844a309e0996f80bd8bde1 is 43, key is default/ns:d/1731414729085/Put/seqid=0 2024-11-12T12:32:10,918 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,918 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,922 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670188824_22 at /127.0.0.1:52770 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52770 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:10,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-12T12:32:10,926 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:10,926 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/ns/fbb33b0a4c844a309e0996f80bd8bde1 2024-11-12T12:32:10,954 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/table/1ba27838700740548acb643a21f34f0f is 52, key is TestHBaseWalOnEC/table:state/1731414729825/Put/seqid=0 2024-11-12T12:32:10,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-12T12:32:10,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-12T12:32:10,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-12T12:32:10,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-12T12:32:10,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-12T12:32:10,957 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,957 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:10,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-12T12:32:10,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-12T12:32:10,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-12T12:32:10,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670188824_22 at /127.0.0.1:40202 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40202 dst: /127.0.0.1:33531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:10,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-12T12:32:10,966 WARN [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:10,966 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/table/1ba27838700740548acb643a21f34f0f 2024-11-12T12:32:10,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:10,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37105-0x1012f080e170002, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:10,972 INFO [RS:1;4a91c05c96a4:37105 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:10,972 INFO [RS:1;4a91c05c96a4:37105 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,37105,1731414726899; zookeeper connection closed. 2024-11-12T12:32:10,973 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c4e5c61 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c4e5c61 2024-11-12T12:32:10,976 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/info/8bb41ddfe051449f87b864ffb341c215 as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/info/8bb41ddfe051449f87b864ffb341c215 2024-11-12T12:32:10,985 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/info/8bb41ddfe051449f87b864ffb341c215, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T12:32:10,987 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/ns/fbb33b0a4c844a309e0996f80bd8bde1 as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/ns/fbb33b0a4c844a309e0996f80bd8bde1 2024-11-12T12:32:10,996 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/ns/fbb33b0a4c844a309e0996f80bd8bde1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T12:32:10,997 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/.tmp/table/1ba27838700740548acb643a21f34f0f as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/table/1ba27838700740548acb643a21f34f0f 2024-11-12T12:32:11,006 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/table/1ba27838700740548acb643a21f34f0f, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T12:32:11,008 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false 2024-11-12T12:32:11,008 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T12:32:11,015 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T12:32:11,016 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T12:32:11,016 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:11,017 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731414730838Running coprocessor pre-close hooks at 1731414730838Disabling compacts and flushes for region at 1731414730838Disabling writes for close at 1731414730838Obtaining lock to block concurrent updates at 1731414730838Preparing flush snapshotting stores in 1588230740 at 1731414730838Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731414730839 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731414730840 (+1 ms)Flushing 1588230740/info: creating writer at 1731414730840Flushing 1588230740/info: appending metadata at 1731414730871 (+31 ms)Flushing 1588230740/info: closing flushed file at 1731414730871Flushing 1588230740/ns: creating writer at 1731414730900 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731414730915 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731414730915Flushing 1588230740/table: creating writer at 1731414730935 (+20 ms)Flushing 1588230740/table: appending metadata at 1731414730953 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731414730953Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12c8bb83: reopening flushed file at 1731414730975 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3596e4c9: reopening flushed file at 1731414730986 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c048de5: reopening flushed file at 1731414730996 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false at 1731414731008 (+12 ms)Writing region close event to WAL at 1731414731010 (+2 ms)Running coprocessor post-close hooks at 1731414731016 (+6 ms)Closed at 1731414731016 2024-11-12T12:32:11,017 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:11,038 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,44607,1731414726947; all regions closed. 2024-11-12T12:32:11,038 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,33963,1731414726800; all regions closed. 2024-11-12T12:32:11,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_1073741829_1019 (size=2751) 2024-11-12T12:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_1073741828_1018 (size=1298) 2024-11-12T12:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_1073741829_1019 (size=2751) 2024-11-12T12:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_1073741828_1018 (size=1298) 2024-11-12T12:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_1073741829_1019 (size=2751) 2024-11-12T12:32:11,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_1073741828_1018 (size=1298) 2024-11-12T12:32:11,047 DEBUG [RS:2;4a91c05c96a4:44607 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs 2024-11-12T12:32:11,047 INFO [RS:2;4a91c05c96a4:44607 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4a91c05c96a4%2C44607%2C1731414726947:(num 1731414728411) 2024-11-12T12:32:11,047 DEBUG [RS:2;4a91c05c96a4:44607 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:11,047 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:11,047 DEBUG [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs 2024-11-12T12:32:11,048 INFO [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4a91c05c96a4%2C33963%2C1731414726800.meta:.meta(num 1731414728932) 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:11,048 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:11,048 INFO [RS:2;4a91c05c96a4:44607 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44607 2024-11-12T12:32:11,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_1073741827_1017 (size=93) 2024-11-12T12:32:11,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_1073741827_1017 (size=93) 2024-11-12T12:32:11,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_1073741827_1017 (size=93) 2024-11-12T12:32:11,054 DEBUG [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/oldWALs 2024-11-12T12:32:11,054 INFO [RS:0;4a91c05c96a4:33963 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4a91c05c96a4%2C33963%2C1731414726800:(num 1731414728411) 2024-11-12T12:32:11,054 DEBUG [RS:0;4a91c05c96a4:33963 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:11,054 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:11,054 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:11,054 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:11,055 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:11,055 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:11,055 INFO [RS:0;4a91c05c96a4:33963 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33963 2024-11-12T12:32:11,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,44607,1731414726947 2024-11-12T12:32:11,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:11,061 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:11,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,33963,1731414726800 2024-11-12T12:32:11,071 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:11,071 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,44607,1731414726947] 2024-11-12T12:32:11,187 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,44607,1731414726947 already deleted, retry=false 2024-11-12T12:32:11,187 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,44607,1731414726947 expired; onlineServers=1 2024-11-12T12:32:11,187 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,33963,1731414726800] 2024-11-12T12:32:11,198 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,33963,1731414726800 already deleted, retry=false 2024-11-12T12:32:11,198 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,33963,1731414726800 expired; onlineServers=0 2024-11-12T12:32:11,198 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4a91c05c96a4,36145,1731414726033' ***** 2024-11-12T12:32:11,199 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T12:32:11,199 INFO [M:0;4a91c05c96a4:36145 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:11,199 INFO [M:0;4a91c05c96a4:36145 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:11,199 DEBUG [M:0;4a91c05c96a4:36145 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T12:32:11,200 DEBUG [M:0;4a91c05c96a4:36145 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T12:32:11,200 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T12:32:11,200 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414728077 {}] cleaner.HFileCleaner(306): Exit Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414728077,5,FailOnTimeoutGroup] 2024-11-12T12:32:11,200 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414728075 {}] cleaner.HFileCleaner(306): Exit Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414728075,5,FailOnTimeoutGroup] 2024-11-12T12:32:11,201 INFO [M:0;4a91c05c96a4:36145 {}] hbase.ChoreService(370): Chore service for: master/4a91c05c96a4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:11,201 INFO [M:0;4a91c05c96a4:36145 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:11,202 DEBUG [M:0;4a91c05c96a4:36145 {}] master.HMaster(1795): Stopping service threads 2024-11-12T12:32:11,202 INFO [M:0;4a91c05c96a4:36145 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T12:32:11,202 INFO [M:0;4a91c05c96a4:36145 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T12:32:11,204 INFO [M:0;4a91c05c96a4:36145 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T12:32:11,204 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T12:32:11,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:11,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:11,208 DEBUG [M:0;4a91c05c96a4:36145 {}] zookeeper.ZKUtil(347): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T12:32:11,208 WARN [M:0;4a91c05c96a4:36145 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T12:32:11,209 INFO [M:0;4a91c05c96a4:36145 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/.lastflushedseqids 2024-11-12T12:32:11,220 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,221 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52842 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52842 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:11,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-12T12:32:11,228 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:11,228 INFO [M:0;4a91c05c96a4:36145 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T12:32:11,228 INFO [M:0;4a91c05c96a4:36145 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T12:32:11,228 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T12:32:11,228 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:11,228 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:11,228 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T12:32:11,228 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:11,229 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-12T12:32:11,247 DEBUG [M:0;4a91c05c96a4:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a1303d51f3074f988d99128af841e766 is 82, key is hbase:meta,,1/info:regioninfo/1731414729016/Put/seqid=0 2024-11-12T12:32:11,249 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,249 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52852 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52852 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:11,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-12T12:32:11,255 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:11,256 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a1303d51f3074f988d99128af841e766 2024-11-12T12:32:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,260 INFO [RS:2;4a91c05c96a4:44607 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x1012f080e170003, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,260 INFO [RS:2;4a91c05c96a4:44607 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,44607,1731414726947; zookeeper connection closed. 2024-11-12T12:32:11,260 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@26c17838 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@26c17838 2024-11-12T12:32:11,278 DEBUG [M:0;4a91c05c96a4:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f643cf02dc9f45e2a044a2c7c606962a is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731414729831/Put/seqid=0 2024-11-12T12:32:11,280 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,280 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,283 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52872 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52872 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:11,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-12T12:32:11,287 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:11,287 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f643cf02dc9f45e2a044a2c7c606962a 2024-11-12T12:32:11,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,287 INFO [RS:0;4a91c05c96a4:33963 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:11,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33963-0x1012f080e170001, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,287 INFO [RS:0;4a91c05c96a4:33963 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,33963,1731414726800; zookeeper connection closed. 2024-11-12T12:32:11,288 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@650e4dd3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@650e4dd3 2024-11-12T12:32:11,288 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T12:32:11,311 DEBUG [M:0;4a91c05c96a4:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/05035ce3f10e4875bbe50cfb9309d55a is 69, key is 4a91c05c96a4,33963,1731414726800/rs:state/1731414728119/Put/seqid=0 2024-11-12T12:32:11,313 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,313 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T12:32:11,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1228856340_22 at /127.0.0.1:52876 [Receiving block BP-1258944945-172.17.0.3-1731414721461:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41173:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52876 dst: /127.0.0.1:41173 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T12:32:11,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-12T12:32:11,320 WARN [M:0;4a91c05c96a4:36145 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T12:32:11,320 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/05035ce3f10e4875bbe50cfb9309d55a 2024-11-12T12:32:11,328 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a1303d51f3074f988d99128af841e766 as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a1303d51f3074f988d99128af841e766 2024-11-12T12:32:11,335 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a1303d51f3074f988d99128af841e766, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T12:32:11,337 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f643cf02dc9f45e2a044a2c7c606962a as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f643cf02dc9f45e2a044a2c7c606962a 2024-11-12T12:32:11,345 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f643cf02dc9f45e2a044a2c7c606962a, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T12:32:11,347 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/05035ce3f10e4875bbe50cfb9309d55a as hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/05035ce3f10e4875bbe50cfb9309d55a 2024-11-12T12:32:11,354 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/05035ce3f10e4875bbe50cfb9309d55a, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T12:32:11,355 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=72, compaction requested=false 2024-11-12T12:32:11,357 INFO [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:11,357 DEBUG [M:0;4a91c05c96a4:36145 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731414731228Disabling compacts and flushes for region at 1731414731228Disabling writes for close at 1731414731228Obtaining lock to block concurrent updates at 1731414731229 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731414731229Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731414731229Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731414731230 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731414731230Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731414731246 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731414731246Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731414731263 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731414731278 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731414731278Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731414731296 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731414731310 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731414731310Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@127b425: reopening flushed file at 1731414731327 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@384eb1fd: reopening flushed file at 1731414731336 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@178ba908: reopening flushed file at 1731414731345 (+9 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=72, compaction requested=false at 1731414731355 (+10 ms)Writing region close event to WAL at 1731414731357 (+2 ms)Closed at 1731414731357 2024-11-12T12:32:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44645 is added to blk_1073741825_1011 (size=32686) 2024-11-12T12:32:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33531 is added to blk_1073741825_1011 (size=32686) 2024-11-12T12:32:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41173 is added to blk_1073741825_1011 (size=32686) 2024-11-12T12:32:11,361 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:11,361 INFO [M:0;4a91c05c96a4:36145 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T12:32:11,361 INFO [M:0;4a91c05c96a4:36145 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36145 2024-11-12T12:32:11,362 INFO [M:0;4a91c05c96a4:36145 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:11,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,474 INFO [M:0;4a91c05c96a4:36145 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:11,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1012f080e170000, quorum=127.0.0.1:54297, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:11,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:11,515 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:11,515 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:11,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:11,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:11,517 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:11,517 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:11,517 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1258944945-172.17.0.3-1731414721461 (Datanode Uuid 9581ae8b-9bbb-4d95-913e-b822260e9dd1) service to localhost/127.0.0.1:32985 2024-11-12T12:32:11,517 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:11,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data5/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data6/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,519 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:11,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:11,521 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:11,521 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:11,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:11,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:11,523 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:11,523 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:11,523 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:11,523 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1258944945-172.17.0.3-1731414721461 (Datanode Uuid 8b4d2a64-da58-474e-a63c-790ee6a25c03) service to localhost/127.0.0.1:32985 2024-11-12T12:32:11,523 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data3/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,523 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data4/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,524 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:11,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:11,526 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:11,526 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:11,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:11,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:11,527 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:11,527 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:11,527 WARN [BP-1258944945-172.17.0.3-1731414721461 heartbeating to localhost/127.0.0.1:32985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1258944945-172.17.0.3-1731414721461 (Datanode Uuid 10d2f4c6-b2c6-4e36-b701-3a37275b048d) service to localhost/127.0.0.1:32985 2024-11-12T12:32:11,527 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:11,528 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data1/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,528 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/cluster_af54d12f-3eca-f27a-b969-e30bd9199a8b/data/data2/current/BP-1258944945-172.17.0.3-1731414721461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:11,528 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:11,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T12:32:11,536 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:11,536 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:11,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:11,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:11,545 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T12:32:11,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T12:32:11,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 155), OpenFileDescriptor=439 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=233 (was 254), ProcessCount=11 (was 11), AvailableMemoryMB=7508 (was 7812) 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=233, ProcessCount=11, AvailableMemoryMB=7508 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.log.dir so I do NOT create it in target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d410aea8-24b4-1a40-926d-4c8822d1b29d/hadoop.tmp.dir so I do NOT create it in target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca, deleteOnExit=true 2024-11-12T12:32:11,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T12:32:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/test.cache.data in system properties and HBase conf 2024-11-12T12:32:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T12:32:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir in system properties and HBase conf 2024-11-12T12:32:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T12:32:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T12:32:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T12:32:11,585 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T12:32:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T12:32:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T12:32:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T12:32:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/nfs.dump.dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/java.io.tmpdir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T12:32:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T12:32:11,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T12:32:11,913 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:11,919 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:11,920 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:11,920 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:11,920 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T12:32:11,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:11,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:11,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:12,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/java.io.tmpdir/jetty-localhost-40889-hadoop-hdfs-3_4_1-tests_jar-_-any-14272739783847275804/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T12:32:12,017 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:40889} 2024-11-12T12:32:12,017 INFO [Time-limited test {}] server.Server(415): Started @12311ms 2024-11-12T12:32:12,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:12,310 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:12,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:12,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:12,311 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T12:32:12,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a92fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:12,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@137179d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:12,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@582dea15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/java.io.tmpdir/jetty-localhost-38335-hadoop-hdfs-3_4_1-tests_jar-_-any-8151637272166850548/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:12,405 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f6d3ff7{HTTP/1.1, (http/1.1)}{localhost:38335} 2024-11-12T12:32:12,405 INFO [Time-limited test {}] server.Server(415): Started @12700ms 2024-11-12T12:32:12,407 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:12,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:12,437 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:12,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:12,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:12,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T12:32:12,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb1336{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:12,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c597470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:12,530 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b340784{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/java.io.tmpdir/jetty-localhost-40411-hadoop-hdfs-3_4_1-tests_jar-_-any-5842194909946768036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:12,531 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1872922a{HTTP/1.1, (http/1.1)}{localhost:40411} 2024-11-12T12:32:12,531 INFO [Time-limited test {}] server.Server(415): Started @12825ms 2024-11-12T12:32:12,532 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:12,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T12:32:12,567 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T12:32:12,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T12:32:12,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T12:32:12,568 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T12:32:12,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da8210{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,AVAILABLE} 2024-11-12T12:32:12,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73f6422f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T12:32:12,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1183a3bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/java.io.tmpdir/jetty-localhost-33757-hadoop-hdfs-3_4_1-tests_jar-_-any-14363879914674893622/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:12,663 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ee4ec12{HTTP/1.1, (http/1.1)}{localhost:33757} 2024-11-12T12:32:12,663 INFO [Time-limited test {}] server.Server(415): Started @12958ms 2024-11-12T12:32:12,665 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T12:32:13,836 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data1/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:13,837 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data2/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:13,852 WARN [Thread-500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:13,855 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7099bc0aff088f9b with lease ID 0x2ee33e0ddba97676: Processing first storage report for DS-c6907340-b83d-41fe-83ad-c70d5a19fe30 from datanode DatanodeRegistration(127.0.0.1:35527, datanodeUuid=ff78627a-2999-4b7d-97b2-7ae4df96539f, infoPort=40051, infoSecurePort=0, ipcPort=38741, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:13,855 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7099bc0aff088f9b with lease ID 0x2ee33e0ddba97676: from storage DS-c6907340-b83d-41fe-83ad-c70d5a19fe30 node DatanodeRegistration(127.0.0.1:35527, datanodeUuid=ff78627a-2999-4b7d-97b2-7ae4df96539f, infoPort=40051, infoSecurePort=0, ipcPort=38741, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:13,855 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7099bc0aff088f9b with lease ID 0x2ee33e0ddba97676: Processing first storage report for DS-5d3558cd-8a99-4c33-aa83-a02358fb1e20 from datanode DatanodeRegistration(127.0.0.1:35527, datanodeUuid=ff78627a-2999-4b7d-97b2-7ae4df96539f, infoPort=40051, infoSecurePort=0, ipcPort=38741, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:13,855 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7099bc0aff088f9b with lease ID 0x2ee33e0ddba97676: from storage DS-5d3558cd-8a99-4c33-aa83-a02358fb1e20 node DatanodeRegistration(127.0.0.1:35527, datanodeUuid=ff78627a-2999-4b7d-97b2-7ae4df96539f, infoPort=40051, infoSecurePort=0, ipcPort=38741, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:14,022 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data3/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:14,022 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data4/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:14,044 WARN [Thread-523 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x140823caf2d0ba35 with lease ID 0x2ee33e0ddba97677: Processing first storage report for DS-9ed1ea3c-38f8-487e-908e-20a08cadd9c7 from datanode DatanodeRegistration(127.0.0.1:45189, datanodeUuid=db7993f6-04c7-4193-b13a-6c4efd097d55, infoPort=38405, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x140823caf2d0ba35 with lease ID 0x2ee33e0ddba97677: from storage DS-9ed1ea3c-38f8-487e-908e-20a08cadd9c7 node DatanodeRegistration(127.0.0.1:45189, datanodeUuid=db7993f6-04c7-4193-b13a-6c4efd097d55, infoPort=38405, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x140823caf2d0ba35 with lease ID 0x2ee33e0ddba97677: Processing first storage report for DS-b4631547-df21-47ab-b089-138861477e93 from datanode DatanodeRegistration(127.0.0.1:45189, datanodeUuid=db7993f6-04c7-4193-b13a-6c4efd097d55, infoPort=38405, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x140823caf2d0ba35 with lease ID 0x2ee33e0ddba97677: from storage DS-b4631547-df21-47ab-b089-138861477e93 node DatanodeRegistration(127.0.0.1:45189, datanodeUuid=db7993f6-04c7-4193-b13a-6c4efd097d55, infoPort=38405, infoSecurePort=0, ipcPort=40513, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:14,076 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data5/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:14,076 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data6/current/BP-1840095573-172.17.0.3-1731414731611/current, will proceed with Du for space computation calculation, 2024-11-12T12:32:14,100 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T12:32:14,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd98f27b5eea103f4 with lease ID 0x2ee33e0ddba97678: Processing first storage report for DS-126b66a5-e61a-4ab4-b3f7-398cebb998cc from datanode DatanodeRegistration(127.0.0.1:40985, datanodeUuid=5865a504-e101-426b-97a0-abe47a44bcb7, infoPort=35819, infoSecurePort=0, ipcPort=36731, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:14,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd98f27b5eea103f4 with lease ID 0x2ee33e0ddba97678: from storage DS-126b66a5-e61a-4ab4-b3f7-398cebb998cc node DatanodeRegistration(127.0.0.1:40985, datanodeUuid=5865a504-e101-426b-97a0-abe47a44bcb7, infoPort=35819, infoSecurePort=0, ipcPort=36731, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:14,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd98f27b5eea103f4 with lease ID 0x2ee33e0ddba97678: Processing first storage report for DS-94de2d47-72a8-422b-88e6-a880f10f439f from datanode DatanodeRegistration(127.0.0.1:40985, datanodeUuid=5865a504-e101-426b-97a0-abe47a44bcb7, infoPort=35819, infoSecurePort=0, ipcPort=36731, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611) 2024-11-12T12:32:14,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd98f27b5eea103f4 with lease ID 0x2ee33e0ddba97678: from storage DS-94de2d47-72a8-422b-88e6-a880f10f439f node DatanodeRegistration(127.0.0.1:40985, datanodeUuid=5865a504-e101-426b-97a0-abe47a44bcb7, infoPort=35819, infoSecurePort=0, ipcPort=36731, storageInfo=lv=-57;cid=testClusterID;nsid=2077384612;c=1731414731611), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T12:32:14,212 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667 2024-11-12T12:32:14,216 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/zookeeper_0, clientPort=58543, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T12:32:14,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58543 2024-11-12T12:32:14,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741825_1001 (size=7) 2024-11-12T12:32:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741825_1001 (size=7) 2024-11-12T12:32:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741825_1001 (size=7) 2024-11-12T12:32:14,234 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c with version=8 2024-11-12T12:32:14,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:32985/user/jenkins/test-data/28264226-6044-cc32-100e-b2cf114f776e/hbase-staging 2024-11-12T12:32:14,236 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:14,236 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T12:32:14,237 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:14,237 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33129 2024-11-12T12:32:14,239 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33129 connecting to ZooKeeper ensemble=127.0.0.1:58543 2024-11-12T12:32:14,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331290x0, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:14,287 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33129-0x1012f0831330000 connected 2024-11-12T12:32:14,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:14,376 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c, hbase.cluster.distributed=false 2024-11-12T12:32:14,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:14,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33129 2024-11-12T12:32:14,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33129 2024-11-12T12:32:14,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33129 2024-11-12T12:32:14,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33129 2024-11-12T12:32:14,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33129 2024-11-12T12:32:14,394 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:14,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,395 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:14,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:14,395 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:14,395 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:14,396 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39861 2024-11-12T12:32:14,397 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39861 connecting to ZooKeeper ensemble=127.0.0.1:58543 2024-11-12T12:32:14,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398610x0, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:14,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398610x0, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:14,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39861-0x1012f0831330001 connected 2024-11-12T12:32:14,409 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:14,409 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:14,410 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:14,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:14,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39861 2024-11-12T12:32:14,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39861 2024-11-12T12:32:14,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39861 2024-11-12T12:32:14,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39861 2024-11-12T12:32:14,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39861 2024-11-12T12:32:14,428 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:14,429 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:14,430 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:32903 2024-11-12T12:32:14,431 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32903 connecting to ZooKeeper ensemble=127.0.0.1:58543 2024-11-12T12:32:14,432 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329030x0, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:14,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32903-0x1012f0831330002 connected 2024-11-12T12:32:14,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:14,448 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:14,449 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:14,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:14,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:14,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-12T12:32:14,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32903 2024-11-12T12:32:14,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32903 2024-11-12T12:32:14,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-12T12:32:14,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-12T12:32:14,471 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4a91c05c96a4:0 server-side Connection retries=45 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T12:32:14,472 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T12:32:14,473 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41369 2024-11-12T12:32:14,474 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41369 connecting to ZooKeeper ensemble=127.0.0.1:58543 2024-11-12T12:32:14,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,480 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T12:32:14,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413690x0, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T12:32:14,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413690x0, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:14,490 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41369-0x1012f0831330003 connected 2024-11-12T12:32:14,491 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T12:32:14,491 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T12:32:14,492 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T12:32:14,493 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T12:32:14,495 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41369 2024-11-12T12:32:14,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41369 2024-11-12T12:32:14,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41369 2024-11-12T12:32:14,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41369 2024-11-12T12:32:14,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41369 2024-11-12T12:32:14,511 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4a91c05c96a4:33129 2024-11-12T12:32:14,511 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,525 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T12:32:14,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,536 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T12:32:14,536 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4a91c05c96a4,33129,1731414734236 from backup master directory 2024-11-12T12:32:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T12:32:14,545 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:14,545 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,552 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/hbase.id] with ID: 3ca4531e-4e56-498f-90f1-bdf21b9fed88 2024-11-12T12:32:14,553 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/.tmp/hbase.id 2024-11-12T12:32:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741826_1002 (size=42) 2024-11-12T12:32:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741826_1002 (size=42) 2024-11-12T12:32:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741826_1002 (size=42) 2024-11-12T12:32:14,565 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/.tmp/hbase.id]:[hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/hbase.id] 2024-11-12T12:32:14,581 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T12:32:14,581 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T12:32:14,582 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T12:32:14,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741827_1003 (size=196) 2024-11-12T12:32:14,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741827_1003 (size=196) 2024-11-12T12:32:14,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741827_1003 (size=196) 2024-11-12T12:32:14,606 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T12:32:14,607 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T12:32:14,607 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T12:32:14,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741828_1004 (size=1189) 2024-11-12T12:32:14,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741828_1004 (size=1189) 2024-11-12T12:32:14,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741828_1004 (size=1189) 2024-11-12T12:32:14,619 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store 2024-11-12T12:32:14,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741829_1005 (size=34) 2024-11-12T12:32:14,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741829_1005 (size=34) 2024-11-12T12:32:14,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741829_1005 (size=34) 2024-11-12T12:32:14,629 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:14,629 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T12:32:14,629 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:14,629 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:14,630 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T12:32:14,630 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:14,630 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:14,630 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731414734629Disabling compacts and flushes for region at 1731414734629Disabling writes for close at 1731414734630 (+1 ms)Writing region close event to WAL at 1731414734630Closed at 1731414734630 2024-11-12T12:32:14,631 WARN [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/.initializing 2024-11-12T12:32:14,631 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/WALs/4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,635 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C33129%2C1731414734236, suffix=, logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/WALs/4a91c05c96a4,33129,1731414734236, archiveDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/oldWALs, maxLogs=10 2024-11-12T12:32:14,635 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4a91c05c96a4%2C33129%2C1731414734236.1731414734635 2024-11-12T12:32:14,644 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/WALs/4a91c05c96a4,33129,1731414734236/4a91c05c96a4%2C33129%2C1731414734236.1731414734635 2024-11-12T12:32:14,646 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38405:38405),(127.0.0.1/127.0.0.1:40051:40051),(127.0.0.1/127.0.0.1:35819:35819)] 2024-11-12T12:32:14,647 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:14,647 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:14,647 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,647 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T12:32:14,651 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:14,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T12:32:14,654 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:14,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T12:32:14,658 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:14,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T12:32:14,662 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,663 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:14,663 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,664 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,664 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,666 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,666 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,666 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:14,668 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T12:32:14,673 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:14,674 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74748842, jitterRate=0.11384454369544983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:14,675 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731414734647Initializing all the Stores at 1731414734648 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414734648Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414734648Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414734649 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414734649Cleaning up temporary data from old regions at 1731414734666 (+17 ms)Region opened successfully at 1731414734675 (+9 ms) 2024-11-12T12:32:14,675 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T12:32:14,680 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eb62852, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:14,681 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T12:32:14,681 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T12:32:14,681 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T12:32:14,681 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T12:32:14,682 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T12:32:14,682 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T12:32:14,682 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T12:32:14,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T12:32:14,686 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T12:32:14,692 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T12:32:14,692 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T12:32:14,693 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T12:32:14,702 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T12:32:14,703 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T12:32:14,704 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T12:32:14,713 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T12:32:14,714 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T12:32:14,723 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T12:32:14,726 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T12:32:14,734 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T12:32:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:14,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:14,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,745 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4a91c05c96a4,33129,1731414734236, sessionid=0x1012f0831330000, setting cluster-up flag (Was=false) 2024-11-12T12:32:14,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,798 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T12:32:14,801 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:14,860 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T12:32:14,862 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:14,863 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T12:32:14,866 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:14,866 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T12:32:14,866 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T12:32:14,867 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4a91c05c96a4,33129,1731414734236 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=5, maxPoolSize=5 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4a91c05c96a4:0, corePoolSize=10, maxPoolSize=10 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:14,869 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731414764870 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T12:32:14,870 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T12:32:14,871 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:14,872 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:14,872 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T12:32:14,872 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T12:32:14,872 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T12:32:14,872 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T12:32:14,872 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T12:32:14,872 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T12:32:14,873 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414734872,5,FailOnTimeoutGroup] 2024-11-12T12:32:14,873 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414734873,5,FailOnTimeoutGroup] 2024-11-12T12:32:14,873 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:14,873 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T12:32:14,873 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:14,873 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:14,873 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,874 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T12:32:14,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741831_1007 (size=1321) 2024-11-12T12:32:14,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741831_1007 (size=1321) 2024-11-12T12:32:14,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741831_1007 (size=1321) 2024-11-12T12:32:14,888 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T12:32:14,888 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c 2024-11-12T12:32:14,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741832_1008 (size=32) 2024-11-12T12:32:14,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741832_1008 (size=32) 2024-11-12T12:32:14,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741832_1008 (size=32) 2024-11-12T12:32:14,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:14,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T12:32:14,900 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T12:32:14,900 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:14,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T12:32:14,901 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(746): ClusterId : 3ca4531e-4e56-498f-90f1-bdf21b9fed88 2024-11-12T12:32:14,901 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(746): ClusterId : 3ca4531e-4e56-498f-90f1-bdf21b9fed88 2024-11-12T12:32:14,901 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(746): ClusterId : 3ca4531e-4e56-498f-90f1-bdf21b9fed88 2024-11-12T12:32:14,901 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:14,901 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:14,901 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T12:32:14,902 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T12:32:14,902 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,903 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:14,903 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T12:32:14,904 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T12:32:14,904 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:14,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T12:32:14,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T12:32:14,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:14,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:14,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T12:32:14,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740 2024-11-12T12:32:14,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740 2024-11-12T12:32:14,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T12:32:14,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T12:32:14,910 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:14,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T12:32:14,914 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:14,914 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:14,915 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:14,915 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:14,915 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:14,916 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64952110, jitterRate=-0.03213813900947571}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:14,916 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T12:32:14,916 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731414734897Initializing all the Stores at 1731414734898 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414734898Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414734898Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414734898Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414734898Cleaning up temporary data from old regions at 1731414734910 (+12 ms)Region opened successfully at 1731414734917 (+7 ms) 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T12:32:14,917 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T12:32:14,917 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:14,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731414734917Disabling compacts and flushes for region at 1731414734917Disabling writes for close at 1731414734917Writing region close event to WAL at 1731414734917Closed at 1731414734917 2024-11-12T12:32:14,919 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:14,919 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T12:32:14,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T12:32:14,922 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T12:32:14,923 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T12:32:14,935 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:14,935 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:14,935 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T12:32:14,935 DEBUG [RS:1;4a91c05c96a4:32903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79ddcaed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:14,935 DEBUG [RS:2;4a91c05c96a4:41369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60e54b59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:14,935 DEBUG [RS:0;4a91c05c96a4:39861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f00693, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4a91c05c96a4/172.17.0.3:0 2024-11-12T12:32:14,949 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;4a91c05c96a4:41369 2024-11-12T12:32:14,949 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:14,949 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:14,949 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:14,950 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,33129,1731414734236 with port=41369, startcode=1731414734471 2024-11-12T12:32:14,950 DEBUG [RS:2;4a91c05c96a4:41369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:14,951 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4a91c05c96a4:39861 2024-11-12T12:32:14,951 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4a91c05c96a4:32903 2024-11-12T12:32:14,951 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:14,951 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:14,951 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T12:32:14,951 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:14,951 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T12:32:14,952 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T12:32:14,952 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,33129,1731414734236 with port=32903, startcode=1731414734428 2024-11-12T12:32:14,952 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(2659): reportForDuty to master=4a91c05c96a4,33129,1731414734236 with port=39861, startcode=1731414734394 2024-11-12T12:32:14,953 DEBUG [RS:0;4a91c05c96a4:39861 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:14,953 DEBUG [RS:1;4a91c05c96a4:32903 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T12:32:14,953 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34859, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:14,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:14,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:14,955 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54251, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:14,955 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51313, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T12:32:14,956 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:14,956 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:14,956 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c 2024-11-12T12:32:14,956 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37041 2024-11-12T12:32:14,956 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:14,958 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:14,958 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c 2024-11-12T12:32:14,958 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33129 {}] master.ServerManager(517): Registering regionserver=4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:14,958 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37041 2024-11-12T12:32:14,958 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:14,961 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c 2024-11-12T12:32:14,961 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37041 2024-11-12T12:32:14,961 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T12:32:14,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:15,005 DEBUG [RS:2;4a91c05c96a4:41369 {}] zookeeper.ZKUtil(111): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,39861,1731414734394] 2024-11-12T12:32:15,005 WARN [RS:2;4a91c05c96a4:41369 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:15,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,32903,1731414734428] 2024-11-12T12:32:15,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4a91c05c96a4,41369,1731414734471] 2024-11-12T12:32:15,005 INFO [RS:2;4a91c05c96a4:41369 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T12:32:15,005 DEBUG [RS:0;4a91c05c96a4:39861 {}] zookeeper.ZKUtil(111): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:15,005 WARN [RS:0;4a91c05c96a4:39861 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:15,005 DEBUG [RS:1;4a91c05c96a4:32903 {}] zookeeper.ZKUtil(111): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:15,005 INFO [RS:0;4a91c05c96a4:39861 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T12:32:15,005 WARN [RS:1;4a91c05c96a4:32903 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T12:32:15,005 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,005 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:15,005 INFO [RS:1;4a91c05c96a4:32903 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T12:32:15,005 DEBUG [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:15,010 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:15,010 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:15,012 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:15,012 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T12:32:15,012 INFO [RS:2;4a91c05c96a4:41369 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:15,012 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,013 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:15,014 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:15,014 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,014 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,014 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,014 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,014 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,014 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 INFO [RS:0;4a91c05c96a4:39861 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,015 DEBUG [RS:2;4a91c05c96a4:41369 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,015 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:15,016 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,016 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,016 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,016 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,017 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,017 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,41369,1731414734471-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:15,018 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:15,018 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T12:32:15,018 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,018 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,018 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 INFO [RS:1;4a91c05c96a4:32903 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T12:32:15,019 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:15,019 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,019 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,020 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,020 DEBUG [RS:0;4a91c05c96a4:39861 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,021 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T12:32:15,021 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,021 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,021 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4a91c05c96a4:0, corePoolSize=2, maxPoolSize=2 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4a91c05c96a4:0, corePoolSize=1, maxPoolSize=1 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,022 DEBUG [RS:1;4a91c05c96a4:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0, corePoolSize=3, maxPoolSize=3 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,026 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,39861,1731414734394-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,028 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,32903,1731414734428-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:15,033 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:15,033 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,41369,1731414734471-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,033 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,034 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.Replication(171): 4a91c05c96a4,41369,1731414734471 started 2024-11-12T12:32:15,042 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:15,042 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,39861,1731414734394-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,042 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,042 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.Replication(171): 4a91c05c96a4,39861,1731414734394 started 2024-11-12T12:32:15,046 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T12:32:15,046 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,32903,1731414734428-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,046 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,046 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.Replication(171): 4a91c05c96a4,32903,1731414734428 started 2024-11-12T12:32:15,047 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,048 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,41369,1731414734471, RpcServer on 4a91c05c96a4/172.17.0.3:41369, sessionid=0x1012f0831330003 2024-11-12T12:32:15,048 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:15,048 DEBUG [RS:2;4a91c05c96a4:41369 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,048 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,41369,1731414734471' 2024-11-12T12:32:15,048 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:15,048 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,41369,1731414734471' 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:15,049 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:15,050 DEBUG [RS:2;4a91c05c96a4:41369 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:15,050 INFO [RS:2;4a91c05c96a4:41369 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:15,050 INFO [RS:2;4a91c05c96a4:41369 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:15,056 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,056 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,39861,1731414734394, RpcServer on 4a91c05c96a4/172.17.0.3:39861, sessionid=0x1012f0831330001 2024-11-12T12:32:15,056 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:15,056 DEBUG [RS:0;4a91c05c96a4:39861 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:15,056 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,39861,1731414734394' 2024-11-12T12:32:15,056 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:15,057 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,39861,1731414734394' 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:15,058 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:15,059 DEBUG [RS:0;4a91c05c96a4:39861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:15,059 INFO [RS:0;4a91c05c96a4:39861 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:15,059 INFO [RS:0;4a91c05c96a4:39861 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:15,060 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,060 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1482): Serving as 4a91c05c96a4,32903,1731414734428, RpcServer on 4a91c05c96a4/172.17.0.3:32903, sessionid=0x1012f0831330002 2024-11-12T12:32:15,060 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T12:32:15,060 DEBUG [RS:1;4a91c05c96a4:32903 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:15,060 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,32903,1731414734428' 2024-11-12T12:32:15,060 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T12:32:15,061 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4a91c05c96a4,32903,1731414734428' 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T12:32:15,062 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T12:32:15,063 DEBUG [RS:1;4a91c05c96a4:32903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T12:32:15,063 INFO [RS:1;4a91c05c96a4:32903 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T12:32:15,063 INFO [RS:1;4a91c05c96a4:32903 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T12:32:15,073 WARN [4a91c05c96a4:33129 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T12:32:15,153 INFO [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C41369%2C1731414734471, suffix=, logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,41369,1731414734471, archiveDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs, maxLogs=32 2024-11-12T12:32:15,156 INFO [RS:2;4a91c05c96a4:41369 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4a91c05c96a4%2C41369%2C1731414734471.1731414735156 2024-11-12T12:32:15,161 INFO [RS:0;4a91c05c96a4:39861 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C39861%2C1731414734394, suffix=, logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,39861,1731414734394, archiveDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs, maxLogs=32 2024-11-12T12:32:15,163 INFO [RS:0;4a91c05c96a4:39861 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4a91c05c96a4%2C39861%2C1731414734394.1731414735162 2024-11-12T12:32:15,165 INFO [RS:1;4a91c05c96a4:32903 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C32903%2C1731414734428, suffix=, logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,32903,1731414734428, archiveDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs, maxLogs=32 2024-11-12T12:32:15,166 INFO [RS:1;4a91c05c96a4:32903 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4a91c05c96a4%2C32903%2C1731414734428.1731414735166 2024-11-12T12:32:15,167 INFO [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,41369,1731414734471/4a91c05c96a4%2C41369%2C1731414734471.1731414735156 2024-11-12T12:32:15,168 DEBUG [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40051:40051),(127.0.0.1/127.0.0.1:35819:35819),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-12T12:32:15,171 INFO [RS:0;4a91c05c96a4:39861 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,39861,1731414734394/4a91c05c96a4%2C39861%2C1731414734394.1731414735162 2024-11-12T12:32:15,173 DEBUG [RS:0;4a91c05c96a4:39861 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35819:35819),(127.0.0.1/127.0.0.1:40051:40051),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-12T12:32:15,174 INFO [RS:1;4a91c05c96a4:32903 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,32903,1731414734428/4a91c05c96a4%2C32903%2C1731414734428.1731414735166 2024-11-12T12:32:15,175 DEBUG [RS:1;4a91c05c96a4:32903 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40051:40051),(127.0.0.1/127.0.0.1:38405:38405),(127.0.0.1/127.0.0.1:35819:35819)] 2024-11-12T12:32:15,324 DEBUG [4a91c05c96a4:33129 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T12:32:15,324 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(204): Hosts are {4a91c05c96a4=0} racks are {/default-rack=0} 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T12:32:15,327 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T12:32:15,327 INFO [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T12:32:15,327 INFO [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T12:32:15,328 INFO [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T12:32:15,328 DEBUG [4a91c05c96a4:33129 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T12:32:15,328 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,331 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4a91c05c96a4,41369,1731414734471, state=OPENING 2024-11-12T12:32:15,353 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T12:32:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:15,443 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T12:32:15,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4a91c05c96a4,41369,1731414734471}] 2024-11-12T12:32:15,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,600 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T12:32:15,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33593, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T12:32:15,607 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T12:32:15,608 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T12:32:15,612 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4a91c05c96a4%2C41369%2C1731414734471.meta, suffix=.meta, logDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,41369,1731414734471, archiveDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs, maxLogs=32 2024-11-12T12:32:15,614 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4a91c05c96a4%2C41369%2C1731414734471.meta.1731414735613.meta 2024-11-12T12:32:15,621 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/WALs/4a91c05c96a4,41369,1731414734471/4a91c05c96a4%2C41369%2C1731414734471.meta.1731414735613.meta 2024-11-12T12:32:15,622 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35819:35819),(127.0.0.1/127.0.0.1:40051:40051),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-12T12:32:15,623 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:15,623 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T12:32:15,623 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T12:32:15,624 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T12:32:15,624 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T12:32:15,624 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:15,624 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T12:32:15,624 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T12:32:15,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T12:32:15,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T12:32:15,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:15,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:15,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T12:32:15,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T12:32:15,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:15,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:15,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T12:32:15,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T12:32:15,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:15,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:15,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T12:32:15,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T12:32:15,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:15,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T12:32:15,636 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T12:32:15,637 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740 2024-11-12T12:32:15,638 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740 2024-11-12T12:32:15,640 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T12:32:15,640 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T12:32:15,640 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T12:32:15,642 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T12:32:15,642 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62677534, jitterRate=-0.06603196263313293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T12:32:15,642 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T12:32:15,644 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731414735624Writing region info on filesystem at 1731414735624Initializing all the Stores at 1731414735625 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414735625Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414735626 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414735626Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731414735626Cleaning up temporary data from old regions at 1731414735640 (+14 ms)Running coprocessor post-open hooks at 1731414735642 (+2 ms)Region opened successfully at 1731414735643 (+1 ms) 2024-11-12T12:32:15,645 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731414735599 2024-11-12T12:32:15,648 DEBUG [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T12:32:15,648 INFO [RS_OPEN_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T12:32:15,649 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,651 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4a91c05c96a4,41369,1731414734471, state=OPEN 2024-11-12T12:32:15,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:15,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:15,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:15,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T12:32:15,660 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:15,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T12:32:15,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T12:32:15,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4a91c05c96a4,41369,1731414734471 in 217 msec 2024-11-12T12:32:15,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T12:32:15,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 746 msec 2024-11-12T12:32:15,670 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T12:32:15,670 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T12:32:15,672 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T12:32:15,672 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4a91c05c96a4,41369,1731414734471, seqNum=-1] 2024-11-12T12:32:15,672 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:15,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:15,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 815 msec 2024-11-12T12:32:15,682 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731414735682, completionTime=-1 2024-11-12T12:32:15,682 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T12:32:15,682 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T12:32:15,684 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T12:32:15,684 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731414795684 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731414855684 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4a91c05c96a4:33129, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,685 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,688 DEBUG [master/4a91c05c96a4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T12:32:15,690 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.145sec 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T12:32:15,691 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T12:32:15,694 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T12:32:15,694 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T12:32:15,694 INFO [master/4a91c05c96a4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4a91c05c96a4,33129,1731414734236-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T12:32:15,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a77301a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:15,702 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4a91c05c96a4,33129,-1 for getting cluster id 2024-11-12T12:32:15,702 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T12:32:15,703 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3ca4531e-4e56-498f-90f1-bdf21b9fed88' 2024-11-12T12:32:15,704 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T12:32:15,704 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3ca4531e-4e56-498f-90f1-bdf21b9fed88" 2024-11-12T12:32:15,704 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12f45248, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:15,704 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4a91c05c96a4,33129,-1] 2024-11-12T12:32:15,705 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T12:32:15,705 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:15,707 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T12:32:15,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10f5e381, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T12:32:15,709 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T12:32:15,710 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4a91c05c96a4,41369,1731414734471, seqNum=-1] 2024-11-12T12:32:15,711 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:15,713 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:15,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:15,716 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T12:32:15,718 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:15,718 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@9b8af11 2024-11-12T12:32:15,718 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T12:32:15,721 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T12:32:15,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T12:32:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T12:32:15,725 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T12:32:15,725 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:15,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T12:32:15,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:15,728 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T12:32:15,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741837_1013 (size=392) 2024-11-12T12:32:15,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741837_1013 (size=392) 2024-11-12T12:32:15,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741837_1013 (size=392) 2024-11-12T12:32:15,739 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ea45ebdce24e3504a599a4407b85affc, NAME => 'TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c 2024-11-12T12:32:15,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741838_1014 (size=51) 2024-11-12T12:32:15,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741838_1014 (size=51) 2024-11-12T12:32:15,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741838_1014 (size=51) 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing ea45ebdce24e3504a599a4407b85affc, disabling compactions & flushes 2024-11-12T12:32:15,749 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. after waiting 0 ms 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:15,749 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:15,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for ea45ebdce24e3504a599a4407b85affc: Waiting for close lock at 1731414735749Disabling compacts and flushes for region at 1731414735749Disabling writes for close at 1731414735749Writing region close event to WAL at 1731414735749Closed at 1731414735749 2024-11-12T12:32:15,751 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T12:32:15,752 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731414735752"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731414735752"}]},"ts":"1731414735752"} 2024-11-12T12:32:15,755 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T12:32:15,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T12:32:15,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731414735757"}]},"ts":"1731414735757"} 2024-11-12T12:32:15,760 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T12:32:15,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4a91c05c96a4=0} racks are {/default-rack=0} 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T12:32:15,761 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T12:32:15,761 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T12:32:15,761 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T12:32:15,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T12:32:15,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ea45ebdce24e3504a599a4407b85affc, ASSIGN}] 2024-11-12T12:32:15,764 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ea45ebdce24e3504a599a4407b85affc, ASSIGN 2024-11-12T12:32:15,765 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ea45ebdce24e3504a599a4407b85affc, ASSIGN; state=OFFLINE, location=4a91c05c96a4,39861,1731414734394; forceNewPlan=false, retain=false 2024-11-12T12:32:15,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:15,916 INFO [4a91c05c96a4:33129 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T12:32:15,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea45ebdce24e3504a599a4407b85affc, regionState=OPENING, regionLocation=4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:15,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ea45ebdce24e3504a599a4407b85affc, ASSIGN because future has completed 2024-11-12T12:32:15,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea45ebdce24e3504a599a4407b85affc, server=4a91c05c96a4,39861,1731414734394}] 2024-11-12T12:32:16,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:16,076 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T12:32:16,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46041, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T12:32:16,087 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,087 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ea45ebdce24e3504a599a4407b85affc, NAME => 'TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.', STARTKEY => '', ENDKEY => ''} 2024-11-12T12:32:16,088 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,088 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T12:32:16,088 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,088 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,090 INFO [StoreOpener-ea45ebdce24e3504a599a4407b85affc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,092 INFO [StoreOpener-ea45ebdce24e3504a599a4407b85affc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea45ebdce24e3504a599a4407b85affc columnFamilyName cf 2024-11-12T12:32:16,092 DEBUG [StoreOpener-ea45ebdce24e3504a599a4407b85affc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T12:32:16,092 INFO [StoreOpener-ea45ebdce24e3504a599a4407b85affc-1 {}] regionserver.HStore(327): Store=ea45ebdce24e3504a599a4407b85affc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T12:32:16,093 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,093 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,094 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,094 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,095 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,097 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,099 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T12:32:16,100 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ea45ebdce24e3504a599a4407b85affc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67249348, jitterRate=0.002093374729156494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T12:32:16,100 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,101 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ea45ebdce24e3504a599a4407b85affc: Running coprocessor pre-open hook at 1731414736088Writing region info on filesystem at 1731414736088Initializing all the Stores at 1731414736090 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731414736090Cleaning up temporary data from old regions at 1731414736095 (+5 ms)Running coprocessor post-open hooks at 1731414736100 (+5 ms)Region opened successfully at 1731414736101 (+1 ms) 2024-11-12T12:32:16,102 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc., pid=6, masterSystemTime=1731414736075 2024-11-12T12:32:16,106 DEBUG [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,106 INFO [RS_OPEN_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,107 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea45ebdce24e3504a599a4407b85affc, regionState=OPEN, openSeqNum=2, regionLocation=4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:16,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea45ebdce24e3504a599a4407b85affc, server=4a91c05c96a4,39861,1731414734394 because future has completed 2024-11-12T12:32:16,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T12:32:16,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ea45ebdce24e3504a599a4407b85affc, server=4a91c05c96a4,39861,1731414734394 in 191 msec 2024-11-12T12:32:16,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T12:32:16,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ea45ebdce24e3504a599a4407b85affc, ASSIGN in 354 msec 2024-11-12T12:32:16,121 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T12:32:16,121 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731414736121"}]},"ts":"1731414736121"} 2024-11-12T12:32:16,124 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T12:32:16,126 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T12:32:16,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 404 msec 2024-11-12T12:32:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T12:32:16,358 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T12:32:16,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T12:32:16,358 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T12:32:16,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T12:32:16,365 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T12:32:16,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T12:32:16,369 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc., hostname=4a91c05c96a4,39861,1731414734394, seqNum=2] 2024-11-12T12:32:16,369 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T12:32:16,372 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33744, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T12:32:16,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T12:32:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T12:32:16,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T12:32:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:16,378 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T12:32:16,378 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T12:32:16,380 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T12:32:16,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T12:32:16,382 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T12:32:16,382 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T12:32:16,383 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-12T12:32:16,383 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-12T12:32:16,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-12T12:32:16,385 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-12T12:32:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:16,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39861 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T12:32:16,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,536 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ea45ebdce24e3504a599a4407b85affc 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T12:32:16,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/.tmp/cf/c4c9ef69d9f84b9caab12ad82992606e is 36, key is row/cf:cq/1731414736372/Put/seqid=0 2024-11-12T12:32:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741839_1015 (size=4787) 2024-11-12T12:32:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741839_1015 (size=4787) 2024-11-12T12:32:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741839_1015 (size=4787) 2024-11-12T12:32:16,559 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/.tmp/cf/c4c9ef69d9f84b9caab12ad82992606e 2024-11-12T12:32:16,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/.tmp/cf/c4c9ef69d9f84b9caab12ad82992606e as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/cf/c4c9ef69d9f84b9caab12ad82992606e 2024-11-12T12:32:16,575 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/cf/c4c9ef69d9f84b9caab12ad82992606e, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T12:32:16,577 INFO [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for ea45ebdce24e3504a599a4407b85affc in 41ms, sequenceid=5, compaction requested=false 2024-11-12T12:32:16,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ea45ebdce24e3504a599a4407b85affc: 2024-11-12T12:32:16,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4a91c05c96a4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T12:32:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T12:32:16,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T12:32:16,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-12T12:32:16,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 209 msec 2024-11-12T12:32:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T12:32:16,697 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T12:32:16,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T12:32:16,701 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T12:32:16,701 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:16,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,701 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T12:32:16,701 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T12:32:16,702 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1802502909, stopped=false 2024-11-12T12:32:16,702 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4a91c05c96a4,33129,1731414734236 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:16,766 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:16,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:16,766 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T12:32:16,767 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:16,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:16,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:16,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:16,767 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,39861,1731414734394' ***** 2024-11-12T12:32:16,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T12:32:16,767 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:16,767 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,32903,1731414734428' ***** 2024-11-12T12:32:16,768 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:16,768 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4a91c05c96a4,41369,1731414734471' ***** 2024-11-12T12:32:16,768 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T12:32:16,768 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:16,768 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:16,768 INFO [RS:0;4a91c05c96a4:39861 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:16,768 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:16,768 INFO [RS:0;4a91c05c96a4:39861 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:16,768 INFO [RS:1;4a91c05c96a4:32903 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:16,768 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(3091): Received CLOSE for ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,768 INFO [RS:1;4a91c05c96a4:32903 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:16,769 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T12:32:16,769 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:16,769 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T12:32:16,769 INFO [RS:1;4a91c05c96a4:32903 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4a91c05c96a4:32903. 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T12:32:16,769 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(959): stopping server 4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:16,769 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:16,769 DEBUG [RS:1;4a91c05c96a4:32903 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:16,769 DEBUG [RS:1;4a91c05c96a4:32903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,769 INFO [RS:0;4a91c05c96a4:39861 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4a91c05c96a4:39861. 2024-11-12T12:32:16,769 INFO [RS:2;4a91c05c96a4:41369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;4a91c05c96a4:41369. 2024-11-12T12:32:16,770 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,32903,1731414734428; all regions closed. 2024-11-12T12:32:16,770 DEBUG [RS:0;4a91c05c96a4:39861 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:16,770 DEBUG [RS:0;4a91c05c96a4:39861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,770 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ea45ebdce24e3504a599a4407b85affc, disabling compactions & flushes 2024-11-12T12:32:16,770 DEBUG [RS:2;4a91c05c96a4:41369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T12:32:16,770 INFO [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,770 DEBUG [RS:2;4a91c05c96a4:41369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,770 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,770 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T12:32:16,770 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. after waiting 0 ms 2024-11-12T12:32:16,770 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T12:32:16,770 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:16,770 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1325): Online Regions={ea45ebdce24e3504a599a4407b85affc=TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc.} 2024-11-12T12:32:16,770 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,770 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:16,770 DEBUG [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1351): Waiting on ea45ebdce24e3504a599a4407b85affc 2024-11-12T12:32:16,770 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:16,771 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T12:32:16,771 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,771 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T12:32:16,771 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,771 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T12:32:16,771 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,771 DEBUG [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T12:32:16,771 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T12:32:16,771 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,771 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T12:32:16,771 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T12:32:16,771 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,772 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T12:32:16,772 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T12:32:16,772 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T12:32:16,777 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/default/TestHBaseWalOnEC/ea45ebdce24e3504a599a4407b85affc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T12:32:16,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741835_1011 (size=93) 2024-11-12T12:32:16,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741835_1011 (size=93) 2024-11-12T12:32:16,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741835_1011 (size=93) 2024-11-12T12:32:16,779 INFO [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,780 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ea45ebdce24e3504a599a4407b85affc: Waiting for close lock at 1731414736770Running coprocessor pre-close hooks at 1731414736770Disabling compacts and flushes for region at 1731414736770Disabling writes for close at 1731414736770Writing region close event to WAL at 1731414736772 (+2 ms)Running coprocessor post-close hooks at 1731414736779 (+7 ms)Closed at 1731414736779 2024-11-12T12:32:16,780 DEBUG [RS_CLOSE_REGION-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc. 2024-11-12T12:32:16,782 DEBUG [RS:1;4a91c05c96a4:32903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4a91c05c96a4%2C32903%2C1731414734428:(num 1731414735166) 2024-11-12T12:32:16,782 DEBUG [RS:1;4a91c05c96a4:32903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:16,782 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:16,782 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:16,783 INFO [RS:1;4a91c05c96a4:32903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:32903 2024-11-12T12:32:16,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:16,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,32903,1731414734428 2024-11-12T12:32:16,795 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:16,798 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/info/ab43939061644c11a28e4c38e37f39ed is 153, key is TestHBaseWalOnEC,,1731414735721.ea45ebdce24e3504a599a4407b85affc./info:regioninfo/1731414736107/Put/seqid=0 2024-11-12T12:32:16,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741840_1016 (size=6637) 2024-11-12T12:32:16,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741840_1016 (size=6637) 2024-11-12T12:32:16,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741840_1016 (size=6637) 2024-11-12T12:32:16,806 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/info/ab43939061644c11a28e4c38e37f39ed 2024-11-12T12:32:16,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,32903,1731414734428] 2024-11-12T12:32:16,818 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,32903,1731414734428 already deleted, retry=false 2024-11-12T12:32:16,818 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,32903,1731414734428 expired; onlineServers=2 2024-11-12T12:32:16,819 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,829 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,830 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/ns/96e68128e55049ffa958d66433e720e3 is 43, key is default/ns:d/1731414735674/Put/seqid=0 2024-11-12T12:32:16,834 INFO [regionserver/4a91c05c96a4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741841_1017 (size=5153) 2024-11-12T12:32:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741841_1017 (size=5153) 2024-11-12T12:32:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741841_1017 (size=5153) 2024-11-12T12:32:16,837 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/ns/96e68128e55049ffa958d66433e720e3 2024-11-12T12:32:16,860 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/table/624516fce2914dde9d697173ddb4d8be is 52, key is TestHBaseWalOnEC/table:state/1731414736121/Put/seqid=0 2024-11-12T12:32:16,861 WARN [IPC Server handler 0 on default port 37041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T12:32:16,861 WARN [IPC Server handler 0 on default port 37041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T12:32:16,862 WARN [IPC Server handler 0 on default port 37041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T12:32:16,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741842_1018 (size=5249) 2024-11-12T12:32:16,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741842_1018 (size=5249) 2024-11-12T12:32:16,867 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/table/624516fce2914dde9d697173ddb4d8be 2024-11-12T12:32:16,876 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/info/ab43939061644c11a28e4c38e37f39ed as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/info/ab43939061644c11a28e4c38e37f39ed 2024-11-12T12:32:16,884 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/info/ab43939061644c11a28e4c38e37f39ed, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T12:32:16,886 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/ns/96e68128e55049ffa958d66433e720e3 as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/ns/96e68128e55049ffa958d66433e720e3 2024-11-12T12:32:16,893 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/ns/96e68128e55049ffa958d66433e720e3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T12:32:16,895 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/.tmp/table/624516fce2914dde9d697173ddb4d8be as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/table/624516fce2914dde9d697173ddb4d8be 2024-11-12T12:32:16,902 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/table/624516fce2914dde9d697173ddb4d8be, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T12:32:16,904 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-12T12:32:16,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:16,908 INFO [RS:1;4a91c05c96a4:32903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:16,908 INFO [RS:1;4a91c05c96a4:32903 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,32903,1731414734428; zookeeper connection closed. 2024-11-12T12:32:16,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1012f0831330002, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:16,908 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f59f802 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f59f802 2024-11-12T12:32:16,910 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T12:32:16,910 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T12:32:16,910 INFO [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:16,911 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731414736771Running coprocessor pre-close hooks at 1731414736771Disabling compacts and flushes for region at 1731414736771Disabling writes for close at 1731414736772 (+1 ms)Obtaining lock to block concurrent updates at 1731414736772Preparing flush snapshotting stores in 1588230740 at 1731414736772Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731414736773 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731414736774 (+1 ms)Flushing 1588230740/info: creating writer at 1731414736774Flushing 1588230740/info: appending metadata at 1731414736798 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731414736798Flushing 1588230740/ns: creating writer at 1731414736814 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731414736829 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731414736829Flushing 1588230740/table: creating writer at 1731414736844 (+15 ms)Flushing 1588230740/table: appending metadata at 1731414736859 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731414736859Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d7e67f2: reopening flushed file at 1731414736874 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@542de3c: reopening flushed file at 1731414736884 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@118a280c: reopening flushed file at 1731414736893 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1731414736904 (+11 ms)Writing region close event to WAL at 1731414736905 (+1 ms)Running coprocessor post-close hooks at 1731414736910 (+5 ms)Closed at 1731414736910 2024-11-12T12:32:16,911 DEBUG [RS_CLOSE_META-regionserver/4a91c05c96a4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T12:32:16,971 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,39861,1731414734394; all regions closed. 2024-11-12T12:32:16,971 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,971 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(976): stopping server 4a91c05c96a4,41369,1731414734471; all regions closed. 2024-11-12T12:32:16,971 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,972 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741834_1010 (size=1298) 2024-11-12T12:32:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741834_1010 (size=1298) 2024-11-12T12:32:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741834_1010 (size=1298) 2024-11-12T12:32:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741836_1012 (size=2751) 2024-11-12T12:32:16,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741836_1012 (size=2751) 2024-11-12T12:32:16,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741836_1012 (size=2751) 2024-11-12T12:32:16,979 DEBUG [RS:0;4a91c05c96a4:39861 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs 2024-11-12T12:32:16,979 DEBUG [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs 2024-11-12T12:32:16,979 INFO [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4a91c05c96a4%2C41369%2C1731414734471.meta:.meta(num 1731414735613) 2024-11-12T12:32:16,979 INFO [RS:0;4a91c05c96a4:39861 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4a91c05c96a4%2C39861%2C1731414734394:(num 1731414735162) 2024-11-12T12:32:16,979 DEBUG [RS:0;4a91c05c96a4:39861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,979 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,979 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:16,980 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,980 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:16,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T12:32:16,980 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:16,980 INFO [RS:0;4a91c05c96a4:39861 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39861 2024-11-12T12:32:16,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741833_1009 (size=93) 2024-11-12T12:32:16,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741833_1009 (size=93) 2024-11-12T12:32:16,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741833_1009 (size=93) 2024-11-12T12:32:16,986 DEBUG [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/oldWALs 2024-11-12T12:32:16,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,39861,1731414734394 2024-11-12T12:32:16,987 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:16,987 INFO [RS:2;4a91c05c96a4:41369 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4a91c05c96a4%2C41369%2C1731414734471:(num 1731414735156) 2024-11-12T12:32:16,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:16,987 DEBUG [RS:2;4a91c05c96a4:41369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T12:32:16,987 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T12:32:16,987 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:16,987 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.ChoreService(370): Chore service for: regionserver/4a91c05c96a4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:16,987 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:16,987 INFO [regionserver/4a91c05c96a4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:16,988 INFO [RS:2;4a91c05c96a4:41369 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41369 2024-11-12T12:32:16,997 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,39861,1731414734394] 2024-11-12T12:32:17,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4a91c05c96a4,41369,1731414734471 2024-11-12T12:32:17,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T12:32:17,008 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:17,018 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,39861,1731414734394 already deleted, retry=false 2024-11-12T12:32:17,018 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,39861,1731414734394 expired; onlineServers=1 2024-11-12T12:32:17,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4a91c05c96a4,41369,1731414734471] 2024-11-12T12:32:17,039 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4a91c05c96a4,41369,1731414734471 already deleted, retry=false 2024-11-12T12:32:17,039 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4a91c05c96a4,41369,1731414734471 expired; onlineServers=0 2024-11-12T12:32:17,039 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4a91c05c96a4,33129,1731414734236' ***** 2024-11-12T12:32:17,039 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T12:32:17,039 INFO [M:0;4a91c05c96a4:33129 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T12:32:17,039 INFO [M:0;4a91c05c96a4:33129 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T12:32:17,040 DEBUG [M:0;4a91c05c96a4:33129 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T12:32:17,040 DEBUG [M:0;4a91c05c96a4:33129 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T12:32:17,040 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T12:32:17,040 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414734872 {}] cleaner.HFileCleaner(306): Exit Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.large.0-1731414734872,5,FailOnTimeoutGroup] 2024-11-12T12:32:17,040 DEBUG [master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414734873 {}] cleaner.HFileCleaner(306): Exit Thread[master/4a91c05c96a4:0:becomeActiveMaster-HFileCleaner.small.0-1731414734873,5,FailOnTimeoutGroup] 2024-11-12T12:32:17,040 INFO [M:0;4a91c05c96a4:33129 {}] hbase.ChoreService(370): Chore service for: master/4a91c05c96a4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T12:32:17,040 INFO [M:0;4a91c05c96a4:33129 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T12:32:17,040 DEBUG [M:0;4a91c05c96a4:33129 {}] master.HMaster(1795): Stopping service threads 2024-11-12T12:32:17,040 INFO [M:0;4a91c05c96a4:33129 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T12:32:17,040 INFO [M:0;4a91c05c96a4:33129 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T12:32:17,041 INFO [M:0;4a91c05c96a4:33129 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T12:32:17,041 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T12:32:17,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T12:32:17,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T12:32:17,050 DEBUG [M:0;4a91c05c96a4:33129 {}] zookeeper.ZKUtil(347): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T12:32:17,050 WARN [M:0;4a91c05c96a4:33129 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T12:32:17,051 INFO [M:0;4a91c05c96a4:33129 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/.lastflushedseqids 2024-11-12T12:32:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741843_1019 (size=127) 2024-11-12T12:32:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741843_1019 (size=127) 2024-11-12T12:32:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741843_1019 (size=127) 2024-11-12T12:32:17,059 INFO [M:0;4a91c05c96a4:33129 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T12:32:17,060 INFO [M:0;4a91c05c96a4:33129 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T12:32:17,060 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T12:32:17,060 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:17,060 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:17,060 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T12:32:17,060 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:17,060 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-12T12:32:17,077 DEBUG [M:0;4a91c05c96a4:33129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64f4b3c817604d37bae2a9c3e6fcafa9 is 82, key is hbase:meta,,1/info:regioninfo/1731414735649/Put/seqid=0 2024-11-12T12:32:17,079 WARN [IPC Server handler 1 on default port 37041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T12:32:17,079 WARN [IPC Server handler 1 on default port 37041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T12:32:17,079 WARN [IPC Server handler 1 on default port 37041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T12:32:17,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741844_1020 (size=5672) 2024-11-12T12:32:17,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741844_1020 (size=5672) 2024-11-12T12:32:17,085 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64f4b3c817604d37bae2a9c3e6fcafa9 2024-11-12T12:32:17,097 INFO [RS:0;4a91c05c96a4:39861 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:17,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39861-0x1012f0831330001, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,097 INFO [RS:0;4a91c05c96a4:39861 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,39861,1731414734394; zookeeper connection closed. 2024-11-12T12:32:17,098 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a05b81d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a05b81d 2024-11-12T12:32:17,113 DEBUG [M:0;4a91c05c96a4:33129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6b79b19178c45de926f92a031854498 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731414736128/Put/seqid=0 2024-11-12T12:32:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741845_1021 (size=6439) 2024-11-12T12:32:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741845_1021 (size=6439) 2024-11-12T12:32:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741845_1021 (size=6439) 2024-11-12T12:32:17,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,129 INFO [RS:2;4a91c05c96a4:41369 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:17,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41369-0x1012f0831330003, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,129 INFO [RS:2;4a91c05c96a4:41369 {}] regionserver.HRegionServer(1031): Exiting; stopping=4a91c05c96a4,41369,1731414734471; zookeeper connection closed. 2024-11-12T12:32:17,129 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@653553ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@653553ee 2024-11-12T12:32:17,130 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T12:32:17,524 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6b79b19178c45de926f92a031854498 2024-11-12T12:32:17,552 DEBUG [M:0;4a91c05c96a4:33129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206fbd745eeb44c89fe3e9cbb817931c is 69, key is 4a91c05c96a4,32903,1731414734428/rs:state/1731414734956/Put/seqid=0 2024-11-12T12:32:17,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741846_1022 (size=5294) 2024-11-12T12:32:17,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741846_1022 (size=5294) 2024-11-12T12:32:17,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741846_1022 (size=5294) 2024-11-12T12:32:17,559 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206fbd745eeb44c89fe3e9cbb817931c 2024-11-12T12:32:17,565 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64f4b3c817604d37bae2a9c3e6fcafa9 as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64f4b3c817604d37bae2a9c3e6fcafa9 2024-11-12T12:32:17,572 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64f4b3c817604d37bae2a9c3e6fcafa9, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T12:32:17,573 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6b79b19178c45de926f92a031854498 as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6b79b19178c45de926f92a031854498 2024-11-12T12:32:17,581 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6b79b19178c45de926f92a031854498, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T12:32:17,582 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206fbd745eeb44c89fe3e9cbb817931c as hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/206fbd745eeb44c89fe3e9cbb817931c 2024-11-12T12:32:17,589 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37041/user/jenkins/test-data/d550b2d8-8158-ab52-365f-09e3849d391c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/206fbd745eeb44c89fe3e9cbb817931c, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T12:32:17,591 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 531ms, sequenceid=72, compaction requested=false 2024-11-12T12:32:17,592 INFO [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T12:32:17,592 DEBUG [M:0;4a91c05c96a4:33129 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731414737060Disabling compacts and flushes for region at 1731414737060Disabling writes for close at 1731414737060Obtaining lock to block concurrent updates at 1731414737060Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731414737060Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731414737061 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731414737062 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731414737062Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731414737077 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731414737077Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731414737091 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731414737113 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731414737113Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731414737538 (+425 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731414737552 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731414737552Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d1f7dbb: reopening flushed file at 1731414737564 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@596d737a: reopening flushed file at 1731414737572 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ea81b65: reopening flushed file at 1731414737581 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 531ms, sequenceid=72, compaction requested=false at 1731414737591 (+10 ms)Writing region close event to WAL at 1731414737592 (+1 ms)Closed at 1731414737592 2024-11-12T12:32:17,593 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:17,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:17,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:17,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:17,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T12:32:17,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741830_1006 (size=32674) 2024-11-12T12:32:17,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741830_1006 (size=32674) 2024-11-12T12:32:17,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45189 is added to blk_1073741830_1006 (size=32674) 2024-11-12T12:32:17,596 INFO [M:0;4a91c05c96a4:33129 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T12:32:17,596 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T12:32:17,597 INFO [M:0;4a91c05c96a4:33129 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33129 2024-11-12T12:32:17,597 INFO [M:0;4a91c05c96a4:33129 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T12:32:17,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,739 INFO [M:0;4a91c05c96a4:33129 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T12:32:17,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33129-0x1012f0831330000, quorum=127.0.0.1:58543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T12:32:17,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1183a3bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:17,745 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ee4ec12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:17,746 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:17,746 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73f6422f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:17,747 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da8210{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:17,749 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:17,749 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:17,749 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1840095573-172.17.0.3-1731414731611 (Datanode Uuid 5865a504-e101-426b-97a0-abe47a44bcb7) service to localhost/127.0.0.1:37041 2024-11-12T12:32:17,749 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:17,750 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data5/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,751 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data6/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,751 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:17,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b340784{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:17,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1872922a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:17,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:17,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c597470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:17,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb1336{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:17,756 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:17,756 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:17,756 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1840095573-172.17.0.3-1731414731611 (Datanode Uuid db7993f6-04c7-4193-b13a-6c4efd097d55) service to localhost/127.0.0.1:37041 2024-11-12T12:32:17,756 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:17,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data3/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data4/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,757 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:17,759 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@582dea15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T12:32:17,759 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f6d3ff7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:17,759 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:17,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@137179d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:17,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a92fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:17,761 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T12:32:17,761 WARN [BP-1840095573-172.17.0.3-1731414731611 heartbeating to localhost/127.0.0.1:37041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1840095573-172.17.0.3-1731414731611 (Datanode Uuid ff78627a-2999-4b7d-97b2-7ae4df96539f) service to localhost/127.0.0.1:37041 2024-11-12T12:32:17,761 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data1/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/cluster_6ab58451-9178-2f10-797f-1a1f3d6a99ca/data/data2/current/BP-1840095573-172.17.0.3-1731414731611 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T12:32:17,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T12:32:17,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T12:32:17,762 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T12:32:17,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T12:32:17,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T12:32:17,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T12:32:17,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T12:32:17,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e03b5ff6-45e9-aa86-52e2-be5bf77fe667/hadoop.log.dir/,STOPPED} 2024-11-12T12:32:17,774 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T12:32:17,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T12:32:17,805 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=146 (was 86) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=247 (was 233) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7333 (was 7508)