2024-11-09 05:54:56,310 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 05:54:56,321 main DEBUG Took 0.009944 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 05:54:56,322 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 05:54:56,322 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 05:54:56,323 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 05:54:56,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,340 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 05:54:56,352 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,354 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,354 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,355 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,356 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,357 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,358 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,359 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,360 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,360 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,361 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,362 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,362 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,363 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,363 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,363 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,364 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 05:54:56,364 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,364 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 05:54:56,366 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 05:54:56,367 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 05:54:56,369 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 05:54:56,369 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 05:54:56,371 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 05:54:56,371 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 05:54:56,379 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 05:54:56,382 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 05:54:56,383 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 05:54:56,384 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 05:54:56,384 main DEBUG createAppenders(={Console}) 2024-11-09 05:54:56,385 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-09 05:54:56,385 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 05:54:56,386 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-09 05:54:56,386 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 05:54:56,386 main DEBUG OutputStream closed 2024-11-09 05:54:56,387 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 05:54:56,387 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 05:54:56,387 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-09 05:54:56,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 05:54:56,458 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 05:54:56,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 05:54:56,461 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 05:54:56,461 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 05:54:56,462 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 05:54:56,462 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 05:54:56,462 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 05:54:56,463 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 05:54:56,463 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 05:54:56,463 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 05:54:56,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 05:54:56,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 05:54:56,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 05:54:56,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 05:54:56,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 05:54:56,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 05:54:56,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 05:54:56,468 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 05:54:56,469 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-09 05:54:56,469 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 05:54:56,470 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-09T05:54:56,484 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-09 05:54:56,487 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 05:54:56,487 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T05:54:56,713 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41 2024-11-09T05:54:56,738 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e, deleteOnExit=true 2024-11-09T05:54:56,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/test.cache.data in system properties and HBase conf 2024-11-09T05:54:56,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T05:54:56,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir in system properties and HBase conf 2024-11-09T05:54:56,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T05:54:56,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T05:54:56,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T05:54:56,836 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T05:54:56,927 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T05:54:56,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T05:54:56,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T05:54:56,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T05:54:56,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T05:54:56,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T05:54:56,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T05:54:56,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T05:54:56,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T05:54:56,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T05:54:56,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/nfs.dump.dir in system properties and HBase conf 2024-11-09T05:54:56,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/java.io.tmpdir in system properties and HBase conf 2024-11-09T05:54:56,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T05:54:56,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T05:54:56,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T05:54:57,996 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T05:54:58,072 INFO [Time-limited test {}] log.Log(170): Logging initialized @2402ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T05:54:58,137 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:54:58,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:54:58,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:54:58,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:54:58,232 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:54:58,248 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:54:58,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:54:58,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:54:58,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/java.io.tmpdir/jetty-localhost-45945-hadoop-hdfs-3_4_1-tests_jar-_-any-2464383040715432031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T05:54:58,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:45945} 2024-11-09T05:54:58,455 INFO [Time-limited test {}] server.Server(415): Started @2786ms 2024-11-09T05:54:58,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:54:58,992 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:54:58,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:54:58,993 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:54:58,993 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:54:58,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:54:58,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:54:59,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/java.io.tmpdir/jetty-localhost-34639-hadoop-hdfs-3_4_1-tests_jar-_-any-10991994125920911473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:54:59,092 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:34639} 2024-11-09T05:54:59,092 INFO [Time-limited test {}] server.Server(415): Started @3423ms 2024-11-09T05:54:59,143 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:54:59,246 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:54:59,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:54:59,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:54:59,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:54:59,253 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:54:59,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:54:59,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:54:59,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/java.io.tmpdir/jetty-localhost-36329-hadoop-hdfs-3_4_1-tests_jar-_-any-14469615948761377908/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:54:59,381 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:36329} 2024-11-09T05:54:59,381 INFO [Time-limited test {}] server.Server(415): Started @3712ms 2024-11-09T05:54:59,383 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:54:59,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:54:59,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:54:59,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:54:59,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:54:59,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:54:59,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:54:59,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:54:59,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/java.io.tmpdir/jetty-localhost-42827-hadoop-hdfs-3_4_1-tests_jar-_-any-428605571016390179/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:54:59,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:42827} 2024-11-09T05:54:59,530 INFO [Time-limited test {}] server.Server(415): Started @3860ms 2024-11-09T05:54:59,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:55:01,078 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data2/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,078 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data3/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,078 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data1/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,078 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data4/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,108 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:01,108 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf45a556e71935e95 with lease ID 0xec7015388c774a17: Processing first storage report for DS-58dfa771-9ed5-473f-834d-9848c0b95656 from datanode DatanodeRegistration(127.0.0.1:36273, datanodeUuid=89648bbf-42d9-4464-a6c3-5eedc9c8b244, infoPort=33935, infoSecurePort=0, ipcPort=45977, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,153 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf45a556e71935e95 with lease ID 0xec7015388c774a17: from storage DS-58dfa771-9ed5-473f-834d-9848c0b95656 node DatanodeRegistration(127.0.0.1:36273, datanodeUuid=89648bbf-42d9-4464-a6c3-5eedc9c8b244, infoPort=33935, infoSecurePort=0, ipcPort=45977, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,153 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bb9284b26741269 with lease ID 0xec7015388c774a18: Processing first storage report for DS-6c263bf4-2a00-45bc-b04d-c757a9999f12 from datanode DatanodeRegistration(127.0.0.1:34253, datanodeUuid=3e44ef00-3164-4efa-9d2f-bde330321388, infoPort=33107, infoSecurePort=0, ipcPort=39049, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,154 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bb9284b26741269 with lease ID 0xec7015388c774a18: from storage DS-6c263bf4-2a00-45bc-b04d-c757a9999f12 node DatanodeRegistration(127.0.0.1:34253, datanodeUuid=3e44ef00-3164-4efa-9d2f-bde330321388, infoPort=33107, infoSecurePort=0, ipcPort=39049, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,154 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf45a556e71935e95 with lease ID 0xec7015388c774a17: Processing first storage report for DS-9039e432-3f13-4bc9-a032-ab2569aa95f4 from datanode DatanodeRegistration(127.0.0.1:36273, datanodeUuid=89648bbf-42d9-4464-a6c3-5eedc9c8b244, infoPort=33935, infoSecurePort=0, ipcPort=45977, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,154 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf45a556e71935e95 with lease ID 0xec7015388c774a17: from storage DS-9039e432-3f13-4bc9-a032-ab2569aa95f4 node DatanodeRegistration(127.0.0.1:36273, datanodeUuid=89648bbf-42d9-4464-a6c3-5eedc9c8b244, infoPort=33935, infoSecurePort=0, ipcPort=45977, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,154 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bb9284b26741269 with lease ID 0xec7015388c774a18: Processing first storage report for DS-8bb0f269-5371-4a0c-89bc-6d905f5f595a from datanode DatanodeRegistration(127.0.0.1:34253, datanodeUuid=3e44ef00-3164-4efa-9d2f-bde330321388, infoPort=33107, infoSecurePort=0, ipcPort=39049, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,155 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bb9284b26741269 with lease ID 0xec7015388c774a18: from storage DS-8bb0f269-5371-4a0c-89bc-6d905f5f595a node DatanodeRegistration(127.0.0.1:34253, datanodeUuid=3e44ef00-3164-4efa-9d2f-bde330321388, infoPort=33107, infoSecurePort=0, ipcPort=39049, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,176 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data6/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,176 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data5/current/BP-1764202313-172.17.0.2-1731131697473/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:01,192 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28a0be5f42f9edb2 with lease ID 0xec7015388c774a19: Processing first storage report for DS-6365bf3d-cdbe-4670-ada1-315216b07bfb from datanode DatanodeRegistration(127.0.0.1:35337, datanodeUuid=bd4e8063-aec1-41c5-96fe-fb36b53d9cea, infoPort=44343, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28a0be5f42f9edb2 with lease ID 0xec7015388c774a19: from storage DS-6365bf3d-cdbe-4670-ada1-315216b07bfb node DatanodeRegistration(127.0.0.1:35337, datanodeUuid=bd4e8063-aec1-41c5-96fe-fb36b53d9cea, infoPort=44343, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,197 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28a0be5f42f9edb2 with lease ID 0xec7015388c774a19: Processing first storage report for DS-6358e757-4424-44ec-a8e9-d8c9de90a614 from datanode DatanodeRegistration(127.0.0.1:35337, datanodeUuid=bd4e8063-aec1-41c5-96fe-fb36b53d9cea, infoPort=44343, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474) 2024-11-09T05:55:01,197 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28a0be5f42f9edb2 with lease ID 0xec7015388c774a19: from storage DS-6358e757-4424-44ec-a8e9-d8c9de90a614 node DatanodeRegistration(127.0.0.1:35337, datanodeUuid=bd4e8063-aec1-41c5-96fe-fb36b53d9cea, infoPort=44343, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1968976312;c=1731131697474), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:01,243 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41 2024-11-09T05:55:01,308 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-09T05:55:01,366 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=36, ProcessCount=11, AvailableMemoryMB=1975 2024-11-09T05:55:01,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T05:55:01,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-09T05:55:01,464 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/zookeeper_0, clientPort=57410, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T05:55:01,474 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57410 2024-11-09T05:55:01,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:01,490 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:01,568 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:01,569 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:01,609 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:52314 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:36273:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52314 dst: /127.0.0.1:36273 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:01,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-09T05:55:02,030 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:02,041 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e with version=8 2024-11-09T05:55:02,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/hbase-staging 2024-11-09T05:55:02,125 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T05:55:02,387 INFO [Time-limited test {}] client.ConnectionUtils(128): master/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:02,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:02,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:02,400 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:02,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:02,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:02,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T05:55:02,587 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T05:55:02,596 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T05:55:02,599 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:02,625 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 92150 (auto-detected) 2024-11-09T05:55:02,626 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-09T05:55:02,645 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39805 2024-11-09T05:55:02,667 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39805 connecting to ZooKeeper ensemble=127.0.0.1:57410 2024-11-09T05:55:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398050x0, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:02,781 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39805-0x1011e2974730000 connected 2024-11-09T05:55:02,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:02,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:02,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:02,895 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e, hbase.cluster.distributed=false 2024-11-09T05:55:02,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:02,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39805 2024-11-09T05:55:02,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39805 2024-11-09T05:55:02,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39805 2024-11-09T05:55:02,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39805 2024-11-09T05:55:02,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39805 2024-11-09T05:55:03,019 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:03,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,021 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:03,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:03,024 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:03,026 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:03,027 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37409 2024-11-09T05:55:03,029 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37409 connecting to ZooKeeper ensemble=127.0.0.1:57410 2024-11-09T05:55:03,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374090x0, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:03,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:374090x0, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:03,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37409-0x1011e2974730001 connected 2024-11-09T05:55:03,056 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:03,063 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:03,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:03,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:03,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37409 2024-11-09T05:55:03,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37409 2024-11-09T05:55:03,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37409 2024-11-09T05:55:03,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37409 2024-11-09T05:55:03,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37409 2024-11-09T05:55:03,087 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:03,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,088 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:03,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:03,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:03,089 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:03,090 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37661 2024-11-09T05:55:03,092 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37661 connecting to ZooKeeper ensemble=127.0.0.1:57410 2024-11-09T05:55:03,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376610x0, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:03,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376610x0, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:03,112 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37661-0x1011e2974730002 connected 2024-11-09T05:55:03,112 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:03,113 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:03,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:03,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:03,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37661 2024-11-09T05:55:03,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37661 2024-11-09T05:55:03,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37661 2024-11-09T05:55:03,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37661 2024-11-09T05:55:03,124 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37661 2024-11-09T05:55:03,140 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:03,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:03,141 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:03,142 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42775 2024-11-09T05:55:03,144 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42775 connecting to ZooKeeper ensemble=127.0.0.1:57410 2024-11-09T05:55:03,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427750x0, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:03,164 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427750x0, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:03,164 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42775-0x1011e2974730003 connected 2024-11-09T05:55:03,165 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:03,165 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:03,166 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:03,168 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:03,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42775 2024-11-09T05:55:03,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42775 2024-11-09T05:55:03,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42775 2024-11-09T05:55:03,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42775 2024-11-09T05:55:03,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42775 2024-11-09T05:55:03,188 DEBUG [M:0;059551c538b7:39805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;059551c538b7:39805 2024-11-09T05:55:03,189 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/059551c538b7,39805,1731131702238 2024-11-09T05:55:03,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,208 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/059551c538b7,39805,1731131702238 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,238 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T05:55:03,240 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/059551c538b7,39805,1731131702238 from backup master directory 2024-11-09T05:55:03,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/059551c538b7,39805,1731131702238 2024-11-09T05:55:03,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:03,252 WARN [master/059551c538b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:03,253 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=059551c538b7,39805,1731131702238 2024-11-09T05:55:03,255 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T05:55:03,256 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T05:55:03,318 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/hbase.id] with ID: 6b277c1b-3513-442e-8c52-dfd123cca6a9 2024-11-09T05:55:03,319 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/.tmp/hbase.id 2024-11-09T05:55:03,326 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,326 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,329 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:56830 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:36273:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56830 dst: /127.0.0.1:36273 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:03,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-09T05:55:03,337 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:03,337 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/.tmp/hbase.id]:[hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/hbase.id] 2024-11-09T05:55:03,385 INFO [master/059551c538b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:03,390 INFO [master/059551c538b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T05:55:03,407 INFO [master/059551c538b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-09T05:55:03,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:03,434 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,434 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,439 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:60644 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60644 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:03,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-09T05:55:03,446 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:03,461 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T05:55:03,462 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T05:55:03,467 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T05:55:03,494 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,494 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:51836 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51836 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:03,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-09T05:55:03,503 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:03,518 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store 2024-11-09T05:55:03,534 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,534 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:03,537 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:51860 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51860 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:03,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-09T05:55:03,543 WARN [master/059551c538b7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:03,546 INFO [master/059551c538b7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T05:55:03,549 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:03,550 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T05:55:03,550 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:03,550 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:03,551 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T05:55:03,551 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:03,552 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:03,553 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731131703550Disabling compacts and flushes for region at 1731131703550Disabling writes for close at 1731131703551 (+1 ms)Writing region close event to WAL at 1731131703552 (+1 ms)Closed at 1731131703552 2024-11-09T05:55:03,555 WARN [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/.initializing 2024-11-09T05:55:03,555 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/WALs/059551c538b7,39805,1731131702238 2024-11-09T05:55:03,564 INFO [master/059551c538b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T05:55:03,579 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C39805%2C1731131702238, suffix=, logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/WALs/059551c538b7,39805,1731131702238, archiveDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/oldWALs, maxLogs=10 2024-11-09T05:55:03,606 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/WALs/059551c538b7,39805,1731131702238/059551c538b7%2C39805%2C1731131702238.1731131703584, exclude list is [], retry=0 2024-11-09T05:55:03,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:03,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34253,DS-6c263bf4-2a00-45bc-b04d-c757a9999f12,DISK] 2024-11-09T05:55:03,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-6365bf3d-cdbe-4670-ada1-315216b07bfb,DISK] 2024-11-09T05:55:03,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36273,DS-58dfa771-9ed5-473f-834d-9848c0b95656,DISK] 2024-11-09T05:55:03,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T05:55:03,669 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/WALs/059551c538b7,39805,1731131702238/059551c538b7%2C39805%2C1731131702238.1731131703584 2024-11-09T05:55:03,670 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44343:44343),(127.0.0.1/127.0.0.1:33935:33935),(127.0.0.1/127.0.0.1:33107:33107)] 2024-11-09T05:55:03,670 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:03,671 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:03,673 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,674 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T05:55:03,740 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:03,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:03,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T05:55:03,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:03,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:03,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T05:55:03,752 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:03,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:03,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T05:55:03,756 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:03,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:03,757 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,760 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,761 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,766 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,766 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,769 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:03,773 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:03,778 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:03,780 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70640757, jitterRate=0.05262930691242218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:03,788 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731131703685Initializing all the Stores at 1731131703687 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131703688 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131703688Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131703689 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131703689Cleaning up temporary data from old regions at 1731131703766 (+77 ms)Region opened successfully at 1731131703788 (+22 ms) 2024-11-09T05:55:03,789 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T05:55:03,820 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eb9e778, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:03,847 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T05:55:03,856 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T05:55:03,856 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T05:55:03,858 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T05:55:03,860 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T05:55:03,864 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-09T05:55:03,864 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T05:55:03,886 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T05:55:03,893 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T05:55:03,942 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T05:55:03,946 INFO [master/059551c538b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T05:55:03,949 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T05:55:03,956 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T05:55:03,959 INFO [master/059551c538b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T05:55:03,963 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T05:55:03,973 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T05:55:03,976 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T05:55:03,988 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T05:55:04,010 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T05:55:04,020 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,036 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=059551c538b7,39805,1731131702238, sessionid=0x1011e2974730000, setting cluster-up flag (Was=false) 2024-11-09T05:55:04,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,104 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T05:55:04,109 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=059551c538b7,39805,1731131702238 2024-11-09T05:55:04,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-09T05:55:04,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-09T05:55:04,167 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T05:55:04,169 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=059551c538b7,39805,1731131702238 2024-11-09T05:55:04,175 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T05:55:04,246 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:04,256 INFO [master/059551c538b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T05:55:04,261 INFO [master/059551c538b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T05:55:04,266 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 059551c538b7,39805,1731131702238 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T05:55:04,273 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:04,273 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/059551c538b7:0, corePoolSize=10, maxPoolSize=10 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:04,274 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,276 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731131734276 2024-11-09T05:55:04,277 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(746): ClusterId : 6b277c1b-3513-442e-8c52-dfd123cca6a9 2024-11-09T05:55:04,277 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(746): ClusterId : 6b277c1b-3513-442e-8c52-dfd123cca6a9 2024-11-09T05:55:04,278 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(746): ClusterId : 6b277c1b-3513-442e-8c52-dfd123cca6a9 2024-11-09T05:55:04,279 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T05:55:04,280 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T05:55:04,280 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:04,280 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:04,280 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:04,281 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:04,281 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T05:55:04,284 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T05:55:04,284 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T05:55:04,285 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T05:55:04,285 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T05:55:04,285 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,289 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T05:55:04,289 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:04,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T05:55:04,290 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T05:55:04,290 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T05:55:04,294 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T05:55:04,294 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T05:55:04,296 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131704295,5,FailOnTimeoutGroup] 2024-11-09T05:55:04,296 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:04,296 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:04,300 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131704296,5,FailOnTimeoutGroup] 2024-11-09T05:55:04,300 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,300 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T05:55:04,301 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,302 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,305 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:60668 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60668 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:04,306 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:04,306 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:04,306 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:04,306 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:04,307 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:04,307 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:04,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-09T05:55:04,317 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:04,319 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T05:55:04,319 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e 2024-11-09T05:55:04,326 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:04,326 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:04,327 DEBUG [RS:0;059551c538b7:37409 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69e81f91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:04,327 DEBUG [RS:1;059551c538b7:37661 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c42198, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:04,328 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:04,328 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:04,328 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:04,329 DEBUG [RS:2;059551c538b7:42775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e747eeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:04,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:51908 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51908 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:04,349 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;059551c538b7:37661 2024-11-09T05:55:04,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-09T05:55:04,351 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;059551c538b7:42775 2024-11-09T05:55:04,352 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:04,352 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;059551c538b7:37409 2024-11-09T05:55:04,353 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:04,354 INFO [RS:2;059551c538b7:42775 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:04,354 INFO [RS:1;059551c538b7:37661 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:04,354 INFO [RS:0;059551c538b7:37409 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:04,354 INFO [RS:2;059551c538b7:42775 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:04,354 INFO [RS:1;059551c538b7:37661 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:04,354 INFO [RS:0;059551c538b7:37409 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:04,354 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:04,354 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:04,354 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:04,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T05:55:04,357 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39805,1731131702238 with port=37409, startcode=1731131702988 2024-11-09T05:55:04,357 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39805,1731131702238 with port=37661, startcode=1731131703087 2024-11-09T05:55:04,357 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39805,1731131702238 with port=42775, startcode=1731131703140 2024-11-09T05:55:04,361 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T05:55:04,361 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:04,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:04,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T05:55:04,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T05:55:04,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:04,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:04,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T05:55:04,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T05:55:04,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:04,374 DEBUG [RS:0;059551c538b7:37409 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:04,374 DEBUG [RS:1;059551c538b7:37661 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:04,374 DEBUG [RS:2;059551c538b7:42775 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:04,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:04,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T05:55:04,378 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T05:55:04,378 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:04,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:04,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T05:55:04,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740 2024-11-09T05:55:04,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740 2024-11-09T05:55:04,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T05:55:04,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T05:55:04,388 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:04,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T05:55:04,402 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:04,404 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73601260, jitterRate=0.09674423933029175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:04,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731131704354Initializing all the Stores at 1731131704356 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131704356Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131704357 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131704357Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131704357Cleaning up temporary data from old regions at 1731131704387 (+30 ms)Region opened successfully at 1731131704409 (+22 ms) 2024-11-09T05:55:04,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T05:55:04,410 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T05:55:04,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T05:55:04,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T05:55:04,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T05:55:04,411 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:04,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731131704410Disabling compacts and flushes for region at 1731131704410Disabling writes for close at 1731131704410Writing region close event to WAL at 1731131704411 (+1 ms)Closed at 1731131704411 2024-11-09T05:55:04,415 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55293, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:04,415 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44971, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:04,415 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50599, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:04,418 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:04,418 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T05:55:04,423 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,42775,1731131703140 2024-11-09T05:55:04,426 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(517): Registering regionserver=059551c538b7,42775,1731131703140 2024-11-09T05:55:04,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T05:55:04,437 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T05:55:04,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,37409,1731131702988 2024-11-09T05:55:04,438 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(517): Registering regionserver=059551c538b7,37409,1731131702988 2024-11-09T05:55:04,442 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e 2024-11-09T05:55:04,442 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33319 2024-11-09T05:55:04,442 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:04,443 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,37661,1731131703087 2024-11-09T05:55:04,443 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T05:55:04,444 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39805 {}] master.ServerManager(517): Registering regionserver=059551c538b7,37661,1731131703087 2024-11-09T05:55:04,444 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e 2024-11-09T05:55:04,444 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33319 2024-11-09T05:55:04,444 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:04,449 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e 2024-11-09T05:55:04,449 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33319 2024-11-09T05:55:04,449 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:04,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:04,511 DEBUG [RS:2;059551c538b7:42775 {}] zookeeper.ZKUtil(111): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,42775,1731131703140 2024-11-09T05:55:04,511 DEBUG [RS:0;059551c538b7:37409 {}] zookeeper.ZKUtil(111): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,37409,1731131702988 2024-11-09T05:55:04,511 WARN [RS:2;059551c538b7:42775 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:04,511 WARN [RS:0;059551c538b7:37409 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:04,511 INFO [RS:0;059551c538b7:37409 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T05:55:04,511 INFO [RS:2;059551c538b7:42775 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T05:55:04,512 DEBUG [RS:1;059551c538b7:37661 {}] zookeeper.ZKUtil(111): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,37661,1731131703087 2024-11-09T05:55:04,512 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37409,1731131702988 2024-11-09T05:55:04,512 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140 2024-11-09T05:55:04,512 WARN [RS:1;059551c538b7:37661 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:04,512 INFO [RS:1;059551c538b7:37661 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T05:55:04,512 DEBUG [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37661,1731131703087 2024-11-09T05:55:04,514 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,42775,1731131703140] 2024-11-09T05:55:04,514 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,37409,1731131702988] 2024-11-09T05:55:04,514 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,37661,1731131703087] 2024-11-09T05:55:04,541 INFO [RS:1;059551c538b7:37661 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:04,541 INFO [RS:2;059551c538b7:42775 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:04,541 INFO [RS:0;059551c538b7:37409 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:04,558 INFO [RS:2;059551c538b7:42775 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:04,558 INFO [RS:1;059551c538b7:37661 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:04,559 INFO [RS:0;059551c538b7:37409 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:04,564 INFO [RS:0;059551c538b7:37409 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:04,564 INFO [RS:1;059551c538b7:37661 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:04,564 INFO [RS:2;059551c538b7:42775 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:04,564 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,564 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,564 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,565 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:04,565 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:04,565 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:04,571 INFO [RS:1;059551c538b7:37661 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:04,571 INFO [RS:0;059551c538b7:37409 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:04,571 INFO [RS:2;059551c538b7:42775 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:04,572 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,572 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,572 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:04,573 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:04,573 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,573 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,574 DEBUG [RS:2;059551c538b7:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,574 DEBUG [RS:1;059551c538b7:37661 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,574 DEBUG [RS:0;059551c538b7:37409 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:04,584 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,584 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,584 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,584 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,584 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,584 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,42775,1731131703140-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,37661,1731131703087-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:04,585 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,37409,1731131702988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:04,596 WARN [059551c538b7:39805 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T05:55:04,605 INFO [RS:2;059551c538b7:42775 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:04,605 INFO [RS:1;059551c538b7:37661 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:04,608 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,42775,1731131703140-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,608 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,37661,1731131703087-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,608 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,608 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,608 INFO [RS:2;059551c538b7:42775 {}] regionserver.Replication(171): 059551c538b7,42775,1731131703140 started 2024-11-09T05:55:04,608 INFO [RS:1;059551c538b7:37661 {}] regionserver.Replication(171): 059551c538b7,37661,1731131703087 started 2024-11-09T05:55:04,611 INFO [RS:0;059551c538b7:37409 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:04,611 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,37409,1731131702988-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,611 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,611 INFO [RS:0;059551c538b7:37409 {}] regionserver.Replication(171): 059551c538b7,37409,1731131702988 started 2024-11-09T05:55:04,629 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,630 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,37661,1731131703087, RpcServer on 059551c538b7/172.17.0.2:37661, sessionid=0x1011e2974730002 2024-11-09T05:55:04,630 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,631 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,42775,1731131703140, RpcServer on 059551c538b7/172.17.0.2:42775, sessionid=0x1011e2974730003 2024-11-09T05:55:04,631 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:04,631 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:04,631 DEBUG [RS:1;059551c538b7:37661 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,37661,1731131703087 2024-11-09T05:55:04,631 DEBUG [RS:2;059551c538b7:42775 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,42775,1731131703140 2024-11-09T05:55:04,631 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,37661,1731131703087' 2024-11-09T05:55:04,631 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,42775,1731131703140' 2024-11-09T05:55:04,631 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:04,631 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:04,632 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:04,632 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,37409,1731131702988, RpcServer on 059551c538b7/172.17.0.2:37409, sessionid=0x1011e2974730001 2024-11-09T05:55:04,632 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:04,632 DEBUG [RS:0;059551c538b7:37409 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,37409,1731131702988 2024-11-09T05:55:04,632 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,37409,1731131702988' 2024-11-09T05:55:04,632 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:04,632 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:04,632 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:04,633 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:04,633 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:04,633 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:04,633 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:04,633 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:04,633 DEBUG [RS:2;059551c538b7:42775 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,42775,1731131703140 2024-11-09T05:55:04,633 DEBUG [RS:1;059551c538b7:37661 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,37661,1731131703087 2024-11-09T05:55:04,633 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,42775,1731131703140' 2024-11-09T05:55:04,633 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,37661,1731131703087' 2024-11-09T05:55:04,633 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:04,633 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:04,633 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:04,633 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:04,633 DEBUG [RS:0;059551c538b7:37409 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,37409,1731131702988 2024-11-09T05:55:04,634 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,37409,1731131702988' 2024-11-09T05:55:04,634 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:04,634 DEBUG [RS:1;059551c538b7:37661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:04,634 DEBUG [RS:2;059551c538b7:42775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:04,634 DEBUG [RS:0;059551c538b7:37409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:04,634 DEBUG [RS:1;059551c538b7:37661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:04,634 DEBUG [RS:2;059551c538b7:42775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:04,635 INFO [RS:2;059551c538b7:42775 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:04,635 INFO [RS:1;059551c538b7:37661 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:04,635 DEBUG [RS:0;059551c538b7:37409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:04,635 INFO [RS:0;059551c538b7:37409 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:04,635 INFO [RS:1;059551c538b7:37661 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:04,635 INFO [RS:2;059551c538b7:42775 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:04,635 INFO [RS:0;059551c538b7:37409 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:04,749 INFO [RS:0;059551c538b7:37409 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T05:55:04,749 INFO [RS:1;059551c538b7:37661 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T05:55:04,749 INFO [RS:2;059551c538b7:42775 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T05:55:04,752 INFO [RS:1;059551c538b7:37661 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C37661%2C1731131703087, suffix=, logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37661,1731131703087, archiveDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs, maxLogs=32 2024-11-09T05:55:04,752 INFO [RS:0;059551c538b7:37409 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C37409%2C1731131702988, suffix=, logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37409,1731131702988, archiveDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs, maxLogs=32 2024-11-09T05:55:04,752 INFO [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C42775%2C1731131703140, suffix=, logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140, archiveDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs, maxLogs=32 2024-11-09T05:55:04,772 DEBUG [RS:0;059551c538b7:37409 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37409,1731131702988/059551c538b7%2C37409%2C1731131702988.1731131704757, exclude list is [], retry=0 2024-11-09T05:55:04,775 DEBUG [RS:1;059551c538b7:37661 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37661,1731131703087/059551c538b7%2C37661%2C1731131703087.1731131704757, exclude list is [], retry=0 2024-11-09T05:55:04,775 DEBUG [RS:2;059551c538b7:42775 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140/059551c538b7%2C42775%2C1731131703140.1731131704757, exclude list is [], retry=0 2024-11-09T05:55:04,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34253,DS-6c263bf4-2a00-45bc-b04d-c757a9999f12,DISK] 2024-11-09T05:55:04,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-6365bf3d-cdbe-4670-ada1-315216b07bfb,DISK] 2024-11-09T05:55:04,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36273,DS-58dfa771-9ed5-473f-834d-9848c0b95656,DISK] 2024-11-09T05:55:04,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-6365bf3d-cdbe-4670-ada1-315216b07bfb,DISK] 2024-11-09T05:55:04,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34253,DS-6c263bf4-2a00-45bc-b04d-c757a9999f12,DISK] 2024-11-09T05:55:04,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34253,DS-6c263bf4-2a00-45bc-b04d-c757a9999f12,DISK] 2024-11-09T05:55:04,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36273,DS-58dfa771-9ed5-473f-834d-9848c0b95656,DISK] 2024-11-09T05:55:04,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36273,DS-58dfa771-9ed5-473f-834d-9848c0b95656,DISK] 2024-11-09T05:55:04,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-6365bf3d-cdbe-4670-ada1-315216b07bfb,DISK] 2024-11-09T05:55:04,810 INFO [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140/059551c538b7%2C42775%2C1731131703140.1731131704757 2024-11-09T05:55:04,810 INFO [RS:0;059551c538b7:37409 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37409,1731131702988/059551c538b7%2C37409%2C1731131702988.1731131704757 2024-11-09T05:55:04,810 DEBUG [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33107:33107),(127.0.0.1/127.0.0.1:33935:33935),(127.0.0.1/127.0.0.1:44343:44343)] 2024-11-09T05:55:04,810 DEBUG [RS:0;059551c538b7:37409 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33935:33935),(127.0.0.1/127.0.0.1:33107:33107),(127.0.0.1/127.0.0.1:44343:44343)] 2024-11-09T05:55:04,810 INFO [RS:1;059551c538b7:37661 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,37661,1731131703087/059551c538b7%2C37661%2C1731131703087.1731131704757 2024-11-09T05:55:04,811 DEBUG [RS:1;059551c538b7:37661 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44343:44343),(127.0.0.1/127.0.0.1:33107:33107),(127.0.0.1/127.0.0.1:33935:33935)] 2024-11-09T05:55:04,849 DEBUG [059551c538b7:39805 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T05:55:04,861 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(204): Hosts are {059551c538b7=0} racks are {/default-rack=0} 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T05:55:04,867 INFO [059551c538b7:39805 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T05:55:04,867 INFO [059551c538b7:39805 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T05:55:04,867 INFO [059551c538b7:39805 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T05:55:04,867 DEBUG [059551c538b7:39805 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T05:55:04,874 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=059551c538b7,42775,1731131703140 2024-11-09T05:55:04,880 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 059551c538b7,42775,1731131703140, state=OPENING 2024-11-09T05:55:04,932 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T05:55:04,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:04,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:04,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:04,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:04,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:04,944 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T05:55:04,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=059551c538b7,42775,1731131703140}] 2024-11-09T05:55:05,125 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T05:55:05,126 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56295, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T05:55:05,138 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T05:55:05,139 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T05:55:05,140 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T05:55:05,144 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C42775%2C1731131703140.meta, suffix=.meta, logDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140, archiveDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs, maxLogs=32 2024-11-09T05:55:05,162 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140/059551c538b7%2C42775%2C1731131703140.meta.1731131705147.meta, exclude list is [], retry=0 2024-11-09T05:55:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35337,DS-6365bf3d-cdbe-4670-ada1-315216b07bfb,DISK] 2024-11-09T05:55:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34253,DS-6c263bf4-2a00-45bc-b04d-c757a9999f12,DISK] 2024-11-09T05:55:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36273,DS-58dfa771-9ed5-473f-834d-9848c0b95656,DISK] 2024-11-09T05:55:05,168 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/WALs/059551c538b7,42775,1731131703140/059551c538b7%2C42775%2C1731131703140.meta.1731131705147.meta 2024-11-09T05:55:05,169 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33107:33107),(127.0.0.1/127.0.0.1:44343:44343),(127.0.0.1/127.0.0.1:33935:33935)] 2024-11-09T05:55:05,169 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:05,171 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T05:55:05,173 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T05:55:05,178 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T05:55:05,201 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T05:55:05,202 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:05,203 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T05:55:05,203 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T05:55:05,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T05:55:05,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T05:55:05,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:05,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T05:55:05,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T05:55:05,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:05,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T05:55:05,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T05:55:05,217 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:05,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T05:55:05,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T05:55:05,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:05,224 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T05:55:05,226 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740 2024-11-09T05:55:05,229 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740 2024-11-09T05:55:05,231 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T05:55:05,231 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T05:55:05,232 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:05,235 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T05:55:05,236 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70320595, jitterRate=0.04785852134227753}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:05,237 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T05:55:05,239 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731131705203Writing region info on filesystem at 1731131705204 (+1 ms)Initializing all the Stores at 1731131705206 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131705206Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131705206Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131705206Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131705207 (+1 ms)Cleaning up temporary data from old regions at 1731131705231 (+24 ms)Running coprocessor post-open hooks at 1731131705237 (+6 ms)Region opened successfully at 1731131705239 (+2 ms) 2024-11-09T05:55:05,247 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731131705115 2024-11-09T05:55:05,259 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T05:55:05,260 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T05:55:05,261 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=059551c538b7,42775,1731131703140 2024-11-09T05:55:05,263 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 059551c538b7,42775,1731131703140, state=OPEN 2024-11-09T05:55:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:05,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:05,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:05,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:05,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:05,273 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=059551c538b7,42775,1731131703140 2024-11-09T05:55:05,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T05:55:05,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=059551c538b7,42775,1731131703140 in 327 msec 2024-11-09T05:55:05,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T05:55:05,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 856 msec 2024-11-09T05:55:05,288 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:05,288 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T05:55:05,305 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T05:55:05,306 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=059551c538b7,42775,1731131703140, seqNum=-1] 2024-11-09T05:55:05,323 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T05:55:05,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59449, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T05:55:05,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1380 sec 2024-11-09T05:55:05,346 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731131705346, completionTime=-1 2024-11-09T05:55:05,349 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T05:55:05,350 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T05:55:05,381 INFO [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T05:55:05,382 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731131765382 2024-11-09T05:55:05,382 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731131825382 2024-11-09T05:55:05,382 INFO [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 32 msec 2024-11-09T05:55:05,384 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T05:55:05,391 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,391 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,391 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,392 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-059551c538b7:39805, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,393 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,393 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,400 DEBUG [master/059551c538b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T05:55:05,420 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.167sec 2024-11-09T05:55:05,422 INFO [master/059551c538b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T05:55:05,423 INFO [master/059551c538b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T05:55:05,424 INFO [master/059551c538b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T05:55:05,424 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T05:55:05,424 INFO [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T05:55:05,425 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:05,426 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T05:55:05,430 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T05:55:05,431 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T05:55:05,432 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39805,1731131702238-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:05,487 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2b8794, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:05,490 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T05:55:05,490 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T05:55:05,493 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 059551c538b7,39805,-1 for getting cluster id 2024-11-09T05:55:05,495 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T05:55:05,503 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6b277c1b-3513-442e-8c52-dfd123cca6a9' 2024-11-09T05:55:05,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T05:55:05,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6b277c1b-3513-442e-8c52-dfd123cca6a9" 2024-11-09T05:55:05,506 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62fd8ea1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:05,506 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [059551c538b7,39805,-1] 2024-11-09T05:55:05,508 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T05:55:05,510 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:05,511 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48890, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T05:55:05,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5260075b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:05,514 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T05:55:05,520 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=059551c538b7,42775,1731131703140, seqNum=-1] 2024-11-09T05:55:05,520 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T05:55:05,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56508, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T05:55:05,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=059551c538b7,39805,1731131702238 2024-11-09T05:55:05,547 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T05:55:05,551 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 059551c538b7,39805,1731131702238 2024-11-09T05:55:05,553 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@51614496 2024-11-09T05:55:05,554 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T05:55:05,556 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48894, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T05:55:05,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T05:55:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T05:55:05,573 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T05:55:05,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T05:55:05,575 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,578 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T05:55:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:05,586 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:05,587 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:05,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:51972 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51972 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-09T05:55:05,603 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:05,606 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => acfe9f32d66f5491946d2ea668cfa50c, NAME => 'TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e 2024-11-09T05:55:05,611 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:05,611 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:05,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:51992 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51992 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-09T05:55:05,620 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:05,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:05,621 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing acfe9f32d66f5491946d2ea668cfa50c, disabling compactions & flushes 2024-11-09T05:55:05,621 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:05,621 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:05,621 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. after waiting 0 ms 2024-11-09T05:55:05,621 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:05,621 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:05,621 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for acfe9f32d66f5491946d2ea668cfa50c: Waiting for close lock at 1731131705621Disabling compacts and flushes for region at 1731131705621Disabling writes for close at 1731131705621Writing region close event to WAL at 1731131705621Closed at 1731131705621 2024-11-09T05:55:05,623 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T05:55:05,628 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731131705623"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731131705623"}]},"ts":"1731131705623"} 2024-11-09T05:55:05,632 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T05:55:05,634 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T05:55:05,636 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731131705634"}]},"ts":"1731131705634"} 2024-11-09T05:55:05,641 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T05:55:05,641 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {059551c538b7=0} racks are {/default-rack=0} 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T05:55:05,643 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T05:55:05,643 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T05:55:05,643 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T05:55:05,643 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T05:55:05,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=acfe9f32d66f5491946d2ea668cfa50c, ASSIGN}] 2024-11-09T05:55:05,647 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=acfe9f32d66f5491946d2ea668cfa50c, ASSIGN 2024-11-09T05:55:05,649 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=acfe9f32d66f5491946d2ea668cfa50c, ASSIGN; state=OFFLINE, location=059551c538b7,37409,1731131702988; forceNewPlan=false, retain=false 2024-11-09T05:55:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:05,803 INFO [059551c538b7:39805 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T05:55:05,804 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=acfe9f32d66f5491946d2ea668cfa50c, regionState=OPENING, regionLocation=059551c538b7,37409,1731131702988 2024-11-09T05:55:05,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=acfe9f32d66f5491946d2ea668cfa50c, ASSIGN because future has completed 2024-11-09T05:55:05,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure acfe9f32d66f5491946d2ea668cfa50c, server=059551c538b7,37409,1731131702988}] 2024-11-09T05:55:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:05,967 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T05:55:05,971 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35281, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T05:55:05,980 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:05,981 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => acfe9f32d66f5491946d2ea668cfa50c, NAME => 'TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:05,981 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,981 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:05,982 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,982 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,984 INFO [StoreOpener-acfe9f32d66f5491946d2ea668cfa50c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,987 INFO [StoreOpener-acfe9f32d66f5491946d2ea668cfa50c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfe9f32d66f5491946d2ea668cfa50c columnFamilyName cf 2024-11-09T05:55:05,987 DEBUG [StoreOpener-acfe9f32d66f5491946d2ea668cfa50c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:05,988 INFO [StoreOpener-acfe9f32d66f5491946d2ea668cfa50c-1 {}] regionserver.HStore(327): Store=acfe9f32d66f5491946d2ea668cfa50c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:05,988 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,990 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,990 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,991 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,991 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,994 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:05,999 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:06,000 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened acfe9f32d66f5491946d2ea668cfa50c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73600426, jitterRate=0.09673181176185608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T05:55:06,000 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:06,001 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for acfe9f32d66f5491946d2ea668cfa50c: Running coprocessor pre-open hook at 1731131705982Writing region info on filesystem at 1731131705982Initializing all the Stores at 1731131705984 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131705984Cleaning up temporary data from old regions at 1731131705991 (+7 ms)Running coprocessor post-open hooks at 1731131706000 (+9 ms)Region opened successfully at 1731131706001 (+1 ms) 2024-11-09T05:55:06,003 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c., pid=6, masterSystemTime=1731131705967 2024-11-09T05:55:06,006 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,006 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,007 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=acfe9f32d66f5491946d2ea668cfa50c, regionState=OPEN, openSeqNum=2, regionLocation=059551c538b7,37409,1731131702988 2024-11-09T05:55:06,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure acfe9f32d66f5491946d2ea668cfa50c, server=059551c538b7,37409,1731131702988 because future has completed 2024-11-09T05:55:06,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T05:55:06,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure acfe9f32d66f5491946d2ea668cfa50c, server=059551c538b7,37409,1731131702988 in 201 msec 2024-11-09T05:55:06,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T05:55:06,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=acfe9f32d66f5491946d2ea668cfa50c, ASSIGN in 373 msec 2024-11-09T05:55:06,022 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T05:55:06,022 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731131706022"}]},"ts":"1731131706022"} 2024-11-09T05:55:06,025 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T05:55:06,027 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T05:55:06,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 462 msec 2024-11-09T05:55:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:06,208 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T05:55:06,208 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T05:55:06,209 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T05:55:06,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T05:55:06,216 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T05:55:06,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T05:55:06,226 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c., hostname=059551c538b7,37409,1731131702988, seqNum=2] 2024-11-09T05:55:06,228 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T05:55:06,230 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47624, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T05:55:06,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T05:55:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T05:55:06,244 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T05:55:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:06,246 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T05:55:06,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T05:55:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:06,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37409 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T05:55:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,417 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing acfe9f32d66f5491946d2ea668cfa50c 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T05:55:06,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/.tmp/cf/c52c61c122c645f8bdd89c7f2b963b11 is 36, key is row/cf:cq/1731131706231/Put/seqid=0 2024-11-09T05:55:06,480 WARN [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:06,480 WARN [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:06,485 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_774068554_22 at /127.0.0.1:60728 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60728 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:06,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-09T05:55:06,490 WARN [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:06,490 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/.tmp/cf/c52c61c122c645f8bdd89c7f2b963b11 2024-11-09T05:55:06,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/.tmp/cf/c52c61c122c645f8bdd89c7f2b963b11 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/cf/c52c61c122c645f8bdd89c7f2b963b11 2024-11-09T05:55:06,542 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/cf/c52c61c122c645f8bdd89c7f2b963b11, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T05:55:06,549 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for acfe9f32d66f5491946d2ea668cfa50c in 133ms, sequenceid=5, compaction requested=false 2024-11-09T05:55:06,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-09T05:55:06,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for acfe9f32d66f5491946d2ea668cfa50c: 2024-11-09T05:55:06,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T05:55:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T05:55:06,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T05:55:06,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 312 msec 2024-11-09T05:55:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:06,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 326 msec 2024-11-09T05:55:06,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:06,878 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T05:55:06,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T05:55:06,898 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T05:55:06,898 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:06,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,903 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T05:55:06,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T05:55:06,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1349950594, stopped=false 2024-11-09T05:55:06,904 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=059551c538b7,39805,1731131702238 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:06,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:06,985 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T05:55:06,985 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T05:55:06,986 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:06,986 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:06,986 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:06,986 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:06,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,986 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:06,987 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,37409,1731131702988' ***** 2024-11-09T05:55:06,988 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:06,988 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,37661,1731131703087' ***** 2024-11-09T05:55:06,988 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:06,988 INFO [RS:1;059551c538b7:37661 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:06,988 INFO [RS:0;059551c538b7:37409 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:06,989 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,42775,1731131703140' ***** 2024-11-09T05:55:06,989 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:06,989 INFO [RS:0;059551c538b7:37409 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:06,989 INFO [RS:1;059551c538b7:37661 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:06,989 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:06,989 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:06,989 INFO [RS:1;059551c538b7:37661 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:06,989 INFO [RS:0;059551c538b7:37409 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:06,989 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,37661,1731131703087 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:06,990 INFO [RS:1;059551c538b7:37661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:06,990 INFO [RS:1;059551c538b7:37661 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;059551c538b7:37661. 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,42775,1731131703140 2024-11-09T05:55:06,990 DEBUG [RS:1;059551c538b7:37661 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:06,990 DEBUG [RS:1;059551c538b7:37661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,990 INFO [RS:2;059551c538b7:42775 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;059551c538b7:42775. 2024-11-09T05:55:06,991 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:06,991 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,37661,1731131703087; all regions closed. 2024-11-09T05:55:06,991 DEBUG [RS:2;059551c538b7:42775 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:06,991 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(3091): Received CLOSE for acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:06,991 DEBUG [RS:2;059551c538b7:42775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,991 INFO [RS:2;059551c538b7:42775 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:06,991 INFO [RS:2;059551c538b7:42775 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:06,992 INFO [RS:2;059551c538b7:42775 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:06,992 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T05:55:06,992 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,37409,1731131702988 2024-11-09T05:55:06,992 INFO [RS:0;059551c538b7:37409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:06,992 INFO [RS:0;059551c538b7:37409 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;059551c538b7:37409. 2024-11-09T05:55:06,992 DEBUG [RS:0;059551c538b7:37409 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:06,992 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T05:55:06,992 DEBUG [RS:0;059551c538b7:37409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:06,992 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T05:55:06,992 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing acfe9f32d66f5491946d2ea668cfa50c, disabling compactions & flushes 2024-11-09T05:55:06,992 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T05:55:06,992 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1325): Online Regions={acfe9f32d66f5491946d2ea668cfa50c=TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c.} 2024-11-09T05:55:06,992 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T05:55:06,992 INFO [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,992 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,992 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T05:55:06,993 DEBUG [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T05:55:06,993 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T05:55:06,993 DEBUG [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1351): Waiting on acfe9f32d66f5491946d2ea668cfa50c 2024-11-09T05:55:06,993 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. after waiting 0 ms 2024-11-09T05:55:06,993 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:06,993 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T05:55:06,993 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T05:55:06,993 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T05:55:06,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_1073741827_1017 (size=93) 2024-11-09T05:55:06,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_1073741827_1017 (size=93) 2024-11-09T05:55:06,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741827_1017 (size=93) 2024-11-09T05:55:06,998 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:06,998 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:06,998 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:07,004 DEBUG [RS:1;059551c538b7:37661 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs 2024-11-09T05:55:07,004 INFO [RS:1;059551c538b7:37661 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 059551c538b7%2C37661%2C1731131703087:(num 1731131704757) 2024-11-09T05:55:07,005 DEBUG [RS:1;059551c538b7:37661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:07,005 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:07,005 INFO [RS:1;059551c538b7:37661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:07,006 INFO [RS:1;059551c538b7:37661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37661 2024-11-09T05:55:07,017 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/default/TestHBaseWalOnEC/acfe9f32d66f5491946d2ea668cfa50c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T05:55:07,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,37661,1731131703087 2024-11-09T05:55:07,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:07,020 INFO [RS:1;059551c538b7:37661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:07,020 INFO [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:07,020 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for acfe9f32d66f5491946d2ea668cfa50c: Waiting for close lock at 1731131706992Running coprocessor pre-close hooks at 1731131706992Disabling compacts and flushes for region at 1731131706992Disabling writes for close at 1731131706993 (+1 ms)Writing region close event to WAL at 1731131706997 (+4 ms)Running coprocessor post-close hooks at 1731131707018 (+21 ms)Closed at 1731131707020 (+2 ms) 2024-11-09T05:55:07,020 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c. 2024-11-09T05:55:07,024 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/info/e5f8e8142d854647ae6640da9ef6faf4 is 153, key is TestHBaseWalOnEC,,1731131705558.acfe9f32d66f5491946d2ea668cfa50c./info:regioninfo/1731131706007/Put/seqid=0 2024-11-09T05:55:07,027 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,027 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,37661,1731131703087] 2024-11-09T05:55:07,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_559494837_22 at /127.0.0.1:60742 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60742 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-09T05:55:07,037 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,037 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/info/e5f8e8142d854647ae6640da9ef6faf4 2024-11-09T05:55:07,040 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,37661,1731131703087 already deleted, retry=false 2024-11-09T05:55:07,041 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,37661,1731131703087 expired; onlineServers=2 2024-11-09T05:55:07,062 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/ns/e6baa5748be24dd1b327df0f0f215116 is 43, key is default/ns:d/1731131705329/Put/seqid=0 2024-11-09T05:55:07,065 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,065 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_559494837_22 at /127.0.0.1:56902 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36273:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56902 dst: /127.0.0.1:36273 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-09T05:55:07,074 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,074 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/ns/e6baa5748be24dd1b327df0f0f215116 2024-11-09T05:55:07,101 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/table/6b0f2efabc794d428dce5c9691a051d3 is 52, key is TestHBaseWalOnEC/table:state/1731131706022/Put/seqid=0 2024-11-09T05:55:07,104 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,104 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,107 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_559494837_22 at /127.0.0.1:60766 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60766 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-09T05:55:07,112 WARN [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,112 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/table/6b0f2efabc794d428dce5c9691a051d3 2024-11-09T05:55:07,123 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/info/e5f8e8142d854647ae6640da9ef6faf4 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/info/e5f8e8142d854647ae6640da9ef6faf4 2024-11-09T05:55:07,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37661-0x1011e2974730002, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,131 INFO [RS:1;059551c538b7:37661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:07,132 INFO [RS:1;059551c538b7:37661 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,37661,1731131703087; zookeeper connection closed. 2024-11-09T05:55:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-09T05:55:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-09T05:55:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-09T05:55:07,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-09T05:55:07,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-09T05:55:07,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-09T05:55:07,138 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/info/e5f8e8142d854647ae6640da9ef6faf4, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T05:55:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-09T05:55:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-09T05:55:07,140 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/ns/e6baa5748be24dd1b327df0f0f215116 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/ns/e6baa5748be24dd1b327df0f0f215116 2024-11-09T05:55:07,141 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b405fbd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b405fbd 2024-11-09T05:55:07,151 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/ns/e6baa5748be24dd1b327df0f0f215116, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T05:55:07,153 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/.tmp/table/6b0f2efabc794d428dce5c9691a051d3 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/table/6b0f2efabc794d428dce5c9691a051d3 2024-11-09T05:55:07,164 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/table/6b0f2efabc794d428dce5c9691a051d3, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T05:55:07,165 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false 2024-11-09T05:55:07,165 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T05:55:07,175 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T05:55:07,176 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T05:55:07,176 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:07,176 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731131706992Running coprocessor pre-close hooks at 1731131706992Disabling compacts and flushes for region at 1731131706992Disabling writes for close at 1731131706993 (+1 ms)Obtaining lock to block concurrent updates at 1731131706993Preparing flush snapshotting stores in 1588230740 at 1731131706993Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731131706994 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731131706995 (+1 ms)Flushing 1588230740/info: creating writer at 1731131706995Flushing 1588230740/info: appending metadata at 1731131707018 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731131707018Flushing 1588230740/ns: creating writer at 1731131707047 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731131707062 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731131707062Flushing 1588230740/table: creating writer at 1731131707084 (+22 ms)Flushing 1588230740/table: appending metadata at 1731131707100 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731131707100Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6678a91a: reopening flushed file at 1731131707121 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d588337: reopening flushed file at 1731131707138 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e6d3a7c: reopening flushed file at 1731131707151 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false at 1731131707165 (+14 ms)Writing region close event to WAL at 1731131707167 (+2 ms)Running coprocessor post-close hooks at 1731131707175 (+8 ms)Closed at 1731131707176 (+1 ms) 2024-11-09T05:55:07,176 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:07,193 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,37409,1731131702988; all regions closed. 2024-11-09T05:55:07,193 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,42775,1731131703140; all regions closed. 2024-11-09T05:55:07,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741829_1019 (size=2751) 2024-11-09T05:55:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_1073741829_1019 (size=2751) 2024-11-09T05:55:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_1073741829_1019 (size=2751) 2024-11-09T05:55:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_1073741826_1016 (size=1298) 2024-11-09T05:55:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741826_1016 (size=1298) 2024-11-09T05:55:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_1073741826_1016 (size=1298) 2024-11-09T05:55:07,203 DEBUG [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs 2024-11-09T05:55:07,203 INFO [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 059551c538b7%2C42775%2C1731131703140.meta:.meta(num 1731131705147) 2024-11-09T05:55:07,203 DEBUG [RS:0;059551c538b7:37409 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs 2024-11-09T05:55:07,203 INFO [RS:0;059551c538b7:37409 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 059551c538b7%2C37409%2C1731131702988:(num 1731131704757) 2024-11-09T05:55:07,203 DEBUG [RS:0;059551c538b7:37409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:07,203 INFO [RS:0;059551c538b7:37409 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:07,203 INFO [RS:0;059551c538b7:37409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:07,204 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:07,204 INFO [RS:0;059551c538b7:37409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37409 2024-11-09T05:55:07,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_1073741828_1018 (size=93) 2024-11-09T05:55:07,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_1073741828_1018 (size=93) 2024-11-09T05:55:07,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741828_1018 (size=93) 2024-11-09T05:55:07,209 DEBUG [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/oldWALs 2024-11-09T05:55:07,209 INFO [RS:2;059551c538b7:42775 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 059551c538b7%2C42775%2C1731131703140:(num 1731131704757) 2024-11-09T05:55:07,209 DEBUG [RS:2;059551c538b7:42775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:07,209 INFO [RS:2;059551c538b7:42775 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:07,209 INFO [RS:2;059551c538b7:42775 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:07,210 INFO [RS:2;059551c538b7:42775 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:07,210 INFO [RS:2;059551c538b7:42775 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:07,210 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:07,210 INFO [RS:2;059551c538b7:42775 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42775 2024-11-09T05:55:07,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,37409,1731131702988 2024-11-09T05:55:07,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:07,215 INFO [RS:0;059551c538b7:37409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:07,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,42775,1731131703140 2024-11-09T05:55:07,226 INFO [RS:2;059551c538b7:42775 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:07,236 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,42775,1731131703140] 2024-11-09T05:55:07,257 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,42775,1731131703140 already deleted, retry=false 2024-11-09T05:55:07,257 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,42775,1731131703140 expired; onlineServers=1 2024-11-09T05:55:07,257 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,37409,1731131702988] 2024-11-09T05:55:07,268 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,37409,1731131702988 already deleted, retry=false 2024-11-09T05:55:07,268 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,37409,1731131702988 expired; onlineServers=0 2024-11-09T05:55:07,268 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '059551c538b7,39805,1731131702238' ***** 2024-11-09T05:55:07,268 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T05:55:07,268 INFO [M:0;059551c538b7:39805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:07,268 INFO [M:0;059551c538b7:39805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:07,268 DEBUG [M:0;059551c538b7:39805 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T05:55:07,269 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T05:55:07,269 DEBUG [M:0;059551c538b7:39805 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T05:55:07,269 DEBUG [master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131704296 {}] cleaner.HFileCleaner(306): Exit Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131704296,5,FailOnTimeoutGroup] 2024-11-09T05:55:07,269 DEBUG [master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131704295 {}] cleaner.HFileCleaner(306): Exit Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131704295,5,FailOnTimeoutGroup] 2024-11-09T05:55:07,269 INFO [M:0;059551c538b7:39805 {}] hbase.ChoreService(370): Chore service for: master/059551c538b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:07,269 INFO [M:0;059551c538b7:39805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:07,269 DEBUG [M:0;059551c538b7:39805 {}] master.HMaster(1795): Stopping service threads 2024-11-09T05:55:07,269 INFO [M:0;059551c538b7:39805 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T05:55:07,269 INFO [M:0;059551c538b7:39805 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T05:55:07,270 INFO [M:0;059551c538b7:39805 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T05:55:07,270 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T05:55:07,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:07,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:07,279 DEBUG [M:0;059551c538b7:39805 {}] zookeeper.ZKUtil(347): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T05:55:07,279 WARN [M:0;059551c538b7:39805 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T05:55:07,280 INFO [M:0;059551c538b7:39805 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/.lastflushedseqids 2024-11-09T05:55:07,289 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,289 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:56986 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36273:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56986 dst: /127.0.0.1:36273 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-09T05:55:07,297 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,298 INFO [M:0;059551c538b7:39805 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T05:55:07,298 INFO [M:0;059551c538b7:39805 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T05:55:07,298 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T05:55:07,298 INFO [M:0;059551c538b7:39805 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:07,298 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:07,298 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T05:55:07,298 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:07,298 INFO [M:0;059551c538b7:39805 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-09T05:55:07,318 DEBUG [M:0;059551c538b7:39805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da58d5db7a1c4c67887b16bd04562941 is 82, key is hbase:meta,,1/info:regioninfo/1731131705261/Put/seqid=0 2024-11-09T05:55:07,320 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,320 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:52046 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52046 dst: /127.0.0.1:35337 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-09T05:55:07,327 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,327 INFO [M:0;059551c538b7:39805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da58d5db7a1c4c67887b16bd04562941 2024-11-09T05:55:07,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,336 INFO [RS:0;059551c538b7:37409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:07,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37409-0x1011e2974730001, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,337 INFO [RS:0;059551c538b7:37409 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,37409,1731131702988; zookeeper connection closed. 2024-11-09T05:55:07,337 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63a4aba5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63a4aba5 2024-11-09T05:55:07,347 INFO [RS:2;059551c538b7:42775 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:07,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x1011e2974730003, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,347 INFO [RS:2;059551c538b7:42775 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,42775,1731131703140; zookeeper connection closed. 2024-11-09T05:55:07,348 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28eb1346 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28eb1346 2024-11-09T05:55:07,348 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T05:55:07,352 DEBUG [M:0;059551c538b7:39805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9763321cd70e4bfea4652c8ae05e2531 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731131706029/Put/seqid=0 2024-11-09T05:55:07,355 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,355 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,358 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:60804 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60804 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775552_1037 (size=6439) 2024-11-09T05:55:07,363 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,363 INFO [M:0;059551c538b7:39805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9763321cd70e4bfea4652c8ae05e2531 2024-11-09T05:55:07,391 DEBUG [M:0;059551c538b7:39805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d248492e155f4137b2cbbbd6674d717e is 69, key is 059551c538b7,37409,1731131702988/rs:state/1731131704438/Put/seqid=0 2024-11-09T05:55:07,393 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,394 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T05:55:07,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1192160228_22 at /127.0.0.1:60828 [Receiving block BP-1764202313-172.17.0.2-1731131697473:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34253:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60828 dst: /127.0.0.1:34253 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T05:55:07,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-09T05:55:07,402 WARN [M:0;059551c538b7:39805 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T05:55:07,402 INFO [M:0;059551c538b7:39805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d248492e155f4137b2cbbbd6674d717e 2024-11-09T05:55:07,411 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da58d5db7a1c4c67887b16bd04562941 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/da58d5db7a1c4c67887b16bd04562941 2024-11-09T05:55:07,418 INFO [M:0;059551c538b7:39805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/da58d5db7a1c4c67887b16bd04562941, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T05:55:07,420 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9763321cd70e4bfea4652c8ae05e2531 as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9763321cd70e4bfea4652c8ae05e2531 2024-11-09T05:55:07,428 INFO [M:0;059551c538b7:39805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9763321cd70e4bfea4652c8ae05e2531, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T05:55:07,429 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d248492e155f4137b2cbbbd6674d717e as hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d248492e155f4137b2cbbbd6674d717e 2024-11-09T05:55:07,436 INFO [M:0;059551c538b7:39805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d248492e155f4137b2cbbbd6674d717e, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T05:55:07,438 INFO [M:0;059551c538b7:39805 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=72, compaction requested=false 2024-11-09T05:55:07,439 INFO [M:0;059551c538b7:39805 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:07,439 DEBUG [M:0;059551c538b7:39805 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731131707298Disabling compacts and flushes for region at 1731131707298Disabling writes for close at 1731131707298Obtaining lock to block concurrent updates at 1731131707299 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731131707299Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731131707299Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731131707300 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731131707300Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731131707317 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731131707317Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731131707334 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731131707352 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731131707352Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731131707372 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731131707391 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731131707391Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735982e0: reopening flushed file at 1731131707410 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54b0530d: reopening flushed file at 1731131707419 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b62522b: reopening flushed file at 1731131707428 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=72, compaction requested=false at 1731131707438 (+10 ms)Writing region close event to WAL at 1731131707439 (+1 ms)Closed at 1731131707439 2024-11-09T05:55:07,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34253 is added to blk_1073741825_1011 (size=32674) 2024-11-09T05:55:07,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35337 is added to blk_1073741825_1011 (size=32674) 2024-11-09T05:55:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36273 is added to blk_1073741825_1011 (size=32674) 2024-11-09T05:55:07,443 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:07,443 INFO [M:0;059551c538b7:39805 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T05:55:07,444 INFO [M:0;059551c538b7:39805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39805 2024-11-09T05:55:07,444 INFO [M:0;059551c538b7:39805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:07,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,552 INFO [M:0;059551c538b7:39805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:07,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39805-0x1011e2974730000, quorum=127.0.0.1:57410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:07,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:07,599 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:07,599 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:07,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:07,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:07,602 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:07,602 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:07,602 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1764202313-172.17.0.2-1731131697473 (Datanode Uuid bd4e8063-aec1-41c5-96fe-fb36b53d9cea) service to localhost/127.0.0.1:33319 2024-11-09T05:55:07,602 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:07,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data5/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data6/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,604 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:07,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:07,606 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:07,606 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:07,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:07,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:07,608 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:07,608 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:07,608 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1764202313-172.17.0.2-1731131697473 (Datanode Uuid 89648bbf-42d9-4464-a6c3-5eedc9c8b244) service to localhost/127.0.0.1:33319 2024-11-09T05:55:07,608 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:07,608 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data3/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,609 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data4/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,609 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:07,611 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:07,611 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:07,612 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:07,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:07,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:07,613 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:07,613 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:07,613 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:07,613 WARN [BP-1764202313-172.17.0.2-1731131697473 heartbeating to localhost/127.0.0.1:33319 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1764202313-172.17.0.2-1731131697473 (Datanode Uuid 3e44ef00-3164-4efa-9d2f-bde330321388) service to localhost/127.0.0.1:33319 2024-11-09T05:55:07,614 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data1/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,615 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/cluster_3ac300fe-70ff-58de-25dc-ce33649a639e/data/data2/current/BP-1764202313-172.17.0.2-1731131697473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:07,615 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:07,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T05:55:07,623 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:07,623 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:07,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:07,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:07,631 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T05:55:07,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T05:55:07,665 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=91 (was 161), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=41 (was 36) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1730 (was 1975) 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=91, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=41, ProcessCount=11, AvailableMemoryMB=1730 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.log.dir so I do NOT create it in target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/283ff998-32df-e6c5-316d-cc8371e4dc41/hadoop.tmp.dir so I do NOT create it in target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b, deleteOnExit=true 2024-11-09T05:55:07,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/test.cache.data in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T05:55:07,672 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T05:55:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/nfs.dump.dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/java.io.tmpdir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T05:55:07,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T05:55:07,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T05:55:08,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:55:08,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:55:08,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:55:08,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:55:08,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:55:08,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:55:08,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16eaa68d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:55:08,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62802e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:55:08,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c97821d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/java.io.tmpdir/jetty-localhost-33705-hadoop-hdfs-3_4_1-tests_jar-_-any-1225433896070987287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T05:55:08,142 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@544c0dd2{HTTP/1.1, (http/1.1)}{localhost:33705} 2024-11-09T05:55:08,142 INFO [Time-limited test {}] server.Server(415): Started @12473ms 2024-11-09T05:55:08,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:55:08,381 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:55:08,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:55:08,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:55:08,382 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T05:55:08,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61d23bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:55:08,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20a0e688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:55:08,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eeef71e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/java.io.tmpdir/jetty-localhost-35295-hadoop-hdfs-3_4_1-tests_jar-_-any-10026057108205513442/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:08,475 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70feba44{HTTP/1.1, (http/1.1)}{localhost:35295} 2024-11-09T05:55:08,475 INFO [Time-limited test {}] server.Server(415): Started @12806ms 2024-11-09T05:55:08,477 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:55:08,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:55:08,511 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:55:08,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:55:08,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:55:08,511 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:55:08,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5afbc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:55:08,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b0441b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:55:08,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30bdc6f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/java.io.tmpdir/jetty-localhost-38801-hadoop-hdfs-3_4_1-tests_jar-_-any-13632997978503857271/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:08,606 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5547eae9{HTTP/1.1, (http/1.1)}{localhost:38801} 2024-11-09T05:55:08,606 INFO [Time-limited test {}] server.Server(415): Started @12937ms 2024-11-09T05:55:08,608 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:55:08,637 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T05:55:08,640 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T05:55:08,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T05:55:08,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T05:55:08,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T05:55:08,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c77de1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,AVAILABLE} 2024-11-09T05:55:08,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438bc7ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T05:55:08,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44672b71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/java.io.tmpdir/jetty-localhost-35823-hadoop-hdfs-3_4_1-tests_jar-_-any-5392761654935965389/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:08,738 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36a9ca95{HTTP/1.1, (http/1.1)}{localhost:35823} 2024-11-09T05:55:08,738 INFO [Time-limited test {}] server.Server(415): Started @13069ms 2024-11-09T05:55:08,741 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T05:55:09,716 WARN [Thread-558 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data1/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:09,716 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data2/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:09,735 WARN [Thread-499 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:09,737 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e0933451585fa85 with lease ID 0xefa27222f4faf4c4: Processing first storage report for DS-897b034c-0117-4605-bc7f-4791782ced1d from datanode DatanodeRegistration(127.0.0.1:40361, datanodeUuid=bb31c275-e95e-4611-813b-ae8895b4b747, infoPort=41501, infoSecurePort=0, ipcPort=42707, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e0933451585fa85 with lease ID 0xefa27222f4faf4c4: from storage DS-897b034c-0117-4605-bc7f-4791782ced1d node DatanodeRegistration(127.0.0.1:40361, datanodeUuid=bb31c275-e95e-4611-813b-ae8895b4b747, infoPort=41501, infoSecurePort=0, ipcPort=42707, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e0933451585fa85 with lease ID 0xefa27222f4faf4c4: Processing first storage report for DS-e0ecc028-702c-4925-a167-27b573e86b1c from datanode DatanodeRegistration(127.0.0.1:40361, datanodeUuid=bb31c275-e95e-4611-813b-ae8895b4b747, infoPort=41501, infoSecurePort=0, ipcPort=42707, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e0933451585fa85 with lease ID 0xefa27222f4faf4c4: from storage DS-e0ecc028-702c-4925-a167-27b573e86b1c node DatanodeRegistration(127.0.0.1:40361, datanodeUuid=bb31c275-e95e-4611-813b-ae8895b4b747, infoPort=41501, infoSecurePort=0, ipcPort=42707, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:09,910 WARN [Thread-570 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data3/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:09,911 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data4/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:09,926 WARN [Thread-522 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x532c23c0d847f508 with lease ID 0xefa27222f4faf4c5: Processing first storage report for DS-571a33a1-70bc-4016-8dae-1908732a8413 from datanode DatanodeRegistration(127.0.0.1:45033, datanodeUuid=cd798d89-dac4-4c4f-8157-ce99c2b505f5, infoPort=43797, infoSecurePort=0, ipcPort=46097, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x532c23c0d847f508 with lease ID 0xefa27222f4faf4c5: from storage DS-571a33a1-70bc-4016-8dae-1908732a8413 node DatanodeRegistration(127.0.0.1:45033, datanodeUuid=cd798d89-dac4-4c4f-8157-ce99c2b505f5, infoPort=43797, infoSecurePort=0, ipcPort=46097, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x532c23c0d847f508 with lease ID 0xefa27222f4faf4c5: Processing first storage report for DS-e3f08142-1591-45db-b016-db4c710779c9 from datanode DatanodeRegistration(127.0.0.1:45033, datanodeUuid=cd798d89-dac4-4c4f-8157-ce99c2b505f5, infoPort=43797, infoSecurePort=0, ipcPort=46097, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x532c23c0d847f508 with lease ID 0xefa27222f4faf4c5: from storage DS-e3f08142-1591-45db-b016-db4c710779c9 node DatanodeRegistration(127.0.0.1:45033, datanodeUuid=cd798d89-dac4-4c4f-8157-ce99c2b505f5, infoPort=43797, infoSecurePort=0, ipcPort=46097, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:10,037 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data5/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:10,037 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data6/current/BP-1438717172-172.17.0.2-1731131707697/current, will proceed with Du for space computation calculation, 2024-11-09T05:55:10,053 WARN [Thread-544 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T05:55:10,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad8c9e881c260e45 with lease ID 0xefa27222f4faf4c6: Processing first storage report for DS-84663e74-52df-4315-b21d-8713e0099a6a from datanode DatanodeRegistration(127.0.0.1:35887, datanodeUuid=bf9af889-d903-4c20-b191-9e031457ed69, infoPort=36907, infoSecurePort=0, ipcPort=44177, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:10,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad8c9e881c260e45 with lease ID 0xefa27222f4faf4c6: from storage DS-84663e74-52df-4315-b21d-8713e0099a6a node DatanodeRegistration(127.0.0.1:35887, datanodeUuid=bf9af889-d903-4c20-b191-9e031457ed69, infoPort=36907, infoSecurePort=0, ipcPort=44177, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:10,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad8c9e881c260e45 with lease ID 0xefa27222f4faf4c6: Processing first storage report for DS-e0748b9b-3487-43e8-885d-0c672b05cb72 from datanode DatanodeRegistration(127.0.0.1:35887, datanodeUuid=bf9af889-d903-4c20-b191-9e031457ed69, infoPort=36907, infoSecurePort=0, ipcPort=44177, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697) 2024-11-09T05:55:10,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad8c9e881c260e45 with lease ID 0xefa27222f4faf4c6: from storage DS-e0748b9b-3487-43e8-885d-0c672b05cb72 node DatanodeRegistration(127.0.0.1:35887, datanodeUuid=bf9af889-d903-4c20-b191-9e031457ed69, infoPort=36907, infoSecurePort=0, ipcPort=44177, storageInfo=lv=-57;cid=testClusterID;nsid=1551007513;c=1731131707697), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T05:55:10,090 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3 2024-11-09T05:55:10,093 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/zookeeper_0, clientPort=58997, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T05:55:10,095 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58997 2024-11-09T05:55:10,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741825_1001 (size=7) 2024-11-09T05:55:10,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741825_1001 (size=7) 2024-11-09T05:55:10,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741825_1001 (size=7) 2024-11-09T05:55:10,114 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb with version=8 2024-11-09T05:55:10,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33319/user/jenkins/test-data/3e39239f-3a6f-6a53-0033-2ffd66e44b7e/hbase-staging 2024-11-09T05:55:10,117 INFO [Time-limited test {}] client.ConnectionUtils(128): master/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T05:55:10,117 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:10,118 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39443 2024-11-09T05:55:10,120 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39443 connecting to ZooKeeper ensemble=127.0.0.1:58997 2024-11-09T05:55:10,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:394430x0, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:10,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39443-0x1011e2996380000 connected 2024-11-09T05:55:10,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:10,272 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb, hbase.cluster.distributed=false 2024-11-09T05:55:10,275 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:10,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39443 2024-11-09T05:55:10,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39443 2024-11-09T05:55:10,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39443 2024-11-09T05:55:10,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39443 2024-11-09T05:55:10,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39443 2024-11-09T05:55:10,291 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:10,291 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:10,292 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39265 2024-11-09T05:55:10,293 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39265 connecting to ZooKeeper ensemble=127.0.0.1:58997 2024-11-09T05:55:10,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,296 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392650x0, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:10,310 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:10,310 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39265-0x1011e2996380001 connected 2024-11-09T05:55:10,311 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:10,312 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:10,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:10,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:10,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-09T05:55:10,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39265 2024-11-09T05:55:10,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39265 2024-11-09T05:55:10,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-09T05:55:10,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-09T05:55:10,337 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:10,338 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:10,339 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38049 2024-11-09T05:55:10,340 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38049 connecting to ZooKeeper ensemble=127.0.0.1:58997 2024-11-09T05:55:10,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380490x0, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:10,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38049-0x1011e2996380002 connected 2024-11-09T05:55:10,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:10,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:10,358 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:10,359 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:10,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:10,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38049 2024-11-09T05:55:10,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38049 2024-11-09T05:55:10,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38049 2024-11-09T05:55:10,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38049 2024-11-09T05:55:10,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38049 2024-11-09T05:55:10,380 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/059551c538b7:0 server-side Connection retries=45 2024-11-09T05:55:10,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T05:55:10,381 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33005 2024-11-09T05:55:10,383 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33005 connecting to ZooKeeper ensemble=127.0.0.1:58997 2024-11-09T05:55:10,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330050x0, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T05:55:10,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:330050x0, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:10,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33005-0x1011e2996380003 connected 2024-11-09T05:55:10,399 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T05:55:10,400 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T05:55:10,401 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T05:55:10,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T05:55:10,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33005 2024-11-09T05:55:10,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33005 2024-11-09T05:55:10,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33005 2024-11-09T05:55:10,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33005 2024-11-09T05:55:10,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33005 2024-11-09T05:55:10,417 DEBUG [M:0;059551c538b7:39443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;059551c538b7:39443 2024-11-09T05:55:10,418 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/059551c538b7,39443,1731131710116 2024-11-09T05:55:10,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,431 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/059551c538b7,39443,1731131710116 2024-11-09T05:55:10,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:10,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:10,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:10,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,442 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T05:55:10,443 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/059551c538b7,39443,1731131710116 from backup master directory 2024-11-09T05:55:10,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/059551c538b7,39443,1731131710116 2024-11-09T05:55:10,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T05:55:10,452 WARN [master/059551c538b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:10,452 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=059551c538b7,39443,1731131710116 2024-11-09T05:55:10,462 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/hbase.id] with ID: bf9c1675-8b1a-48f2-90b2-3d9bf10700c0 2024-11-09T05:55:10,462 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/.tmp/hbase.id 2024-11-09T05:55:10,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741826_1002 (size=42) 2024-11-09T05:55:10,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741826_1002 (size=42) 2024-11-09T05:55:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741826_1002 (size=42) 2024-11-09T05:55:10,472 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/.tmp/hbase.id]:[hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/hbase.id] 2024-11-09T05:55:10,489 INFO [master/059551c538b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T05:55:10,489 INFO [master/059551c538b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T05:55:10,490 INFO [master/059551c538b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-09T05:55:10,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741827_1003 (size=196) 2024-11-09T05:55:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741827_1003 (size=196) 2024-11-09T05:55:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741827_1003 (size=196) 2024-11-09T05:55:10,515 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T05:55:10,516 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T05:55:10,516 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T05:55:10,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741828_1004 (size=1189) 2024-11-09T05:55:10,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741828_1004 (size=1189) 2024-11-09T05:55:10,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741828_1004 (size=1189) 2024-11-09T05:55:10,530 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store 2024-11-09T05:55:10,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741829_1005 (size=34) 2024-11-09T05:55:10,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741829_1005 (size=34) 2024-11-09T05:55:10,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741829_1005 (size=34) 2024-11-09T05:55:10,540 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:10,540 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T05:55:10,540 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:10,540 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:10,541 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T05:55:10,541 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:10,541 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:10,541 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731131710540Disabling compacts and flushes for region at 1731131710540Disabling writes for close at 1731131710541 (+1 ms)Writing region close event to WAL at 1731131710541Closed at 1731131710541 2024-11-09T05:55:10,542 WARN [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/.initializing 2024-11-09T05:55:10,542 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/WALs/059551c538b7,39443,1731131710116 2024-11-09T05:55:10,546 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C39443%2C1731131710116, suffix=, logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/WALs/059551c538b7,39443,1731131710116, archiveDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/oldWALs, maxLogs=10 2024-11-09T05:55:10,547 INFO [master/059551c538b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 059551c538b7%2C39443%2C1731131710116.1731131710547 2024-11-09T05:55:10,557 INFO [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/WALs/059551c538b7,39443,1731131710116/059551c538b7%2C39443%2C1731131710116.1731131710547 2024-11-09T05:55:10,559 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36907:36907),(127.0.0.1/127.0.0.1:43797:43797),(127.0.0.1/127.0.0.1:41501:41501)] 2024-11-09T05:55:10,561 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:10,561 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:10,561 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,561 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T05:55:10,564 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:10,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T05:55:10,567 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:10,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,570 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T05:55:10,570 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:10,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T05:55:10,573 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:10,574 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,575 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,575 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,577 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,577 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,578 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:10,579 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T05:55:10,582 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:10,582 INFO [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64525370, jitterRate=-0.03849706053733826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:10,583 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731131710561Initializing all the Stores at 1731131710562 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131710562Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131710562Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131710562Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131710562Cleaning up temporary data from old regions at 1731131710577 (+15 ms)Region opened successfully at 1731131710583 (+6 ms) 2024-11-09T05:55:10,584 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T05:55:10,589 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@575d7f0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:10,590 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T05:55:10,590 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T05:55:10,590 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T05:55:10,590 INFO [master/059551c538b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T05:55:10,591 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-09T05:55:10,591 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-09T05:55:10,591 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T05:55:10,594 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T05:55:10,595 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T05:55:10,604 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T05:55:10,605 INFO [master/059551c538b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T05:55:10,606 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T05:55:10,615 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T05:55:10,615 INFO [master/059551c538b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T05:55:10,617 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T05:55:10,625 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T05:55:10,627 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T05:55:10,636 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T05:55:10,638 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T05:55:10,651 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,662 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=059551c538b7,39443,1731131710116, sessionid=0x1011e2996380000, setting cluster-up flag (Was=false) 2024-11-09T05:55:10,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,709 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-09T05:55:10,714 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T05:55:10,717 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=059551c538b7,39443,1731131710116 2024-11-09T05:55:10,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T05:55:10,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:10,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T05:55:10,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T05:55:10,773 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T05:55:10,774 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=059551c538b7,39443,1731131710116 2024-11-09T05:55:10,776 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T05:55:10,778 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:10,778 INFO [master/059551c538b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T05:55:10,778 INFO [master/059551c538b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T05:55:10,779 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 059551c538b7,39443,1731131710116 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/059551c538b7:0, corePoolSize=5, maxPoolSize=5 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/059551c538b7:0, corePoolSize=10, maxPoolSize=10 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:10,781 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731131740783 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T05:55:10,783 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T05:55:10,784 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:10,784 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T05:55:10,784 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,784 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T05:55:10,784 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T05:55:10,785 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T05:55:10,785 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T05:55:10,785 INFO [master/059551c538b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T05:55:10,785 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131710785,5,FailOnTimeoutGroup] 2024-11-09T05:55:10,785 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,785 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T05:55:10,789 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131710785,5,FailOnTimeoutGroup] 2024-11-09T05:55:10,789 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,789 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T05:55:10,789 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,790 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741831_1007 (size=1321) 2024-11-09T05:55:10,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741831_1007 (size=1321) 2024-11-09T05:55:10,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741831_1007 (size=1321) 2024-11-09T05:55:10,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T05:55:10,798 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb 2024-11-09T05:55:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741832_1008 (size=32) 2024-11-09T05:55:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741832_1008 (size=32) 2024-11-09T05:55:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741832_1008 (size=32) 2024-11-09T05:55:10,808 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(746): ClusterId : bf9c1675-8b1a-48f2-90b2-3d9bf10700c0 2024-11-09T05:55:10,809 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(746): ClusterId : bf9c1675-8b1a-48f2-90b2-3d9bf10700c0 2024-11-09T05:55:10,809 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(746): ClusterId : bf9c1675-8b1a-48f2-90b2-3d9bf10700c0 2024-11-09T05:55:10,809 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:10,809 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:10,809 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T05:55:10,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:10,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T05:55:10,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T05:55:10,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:10,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T05:55:10,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T05:55:10,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:10,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T05:55:10,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T05:55:10,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:10,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T05:55:10,820 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:10,820 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:10,820 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:10,820 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:10,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T05:55:10,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:10,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:10,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T05:55:10,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740 2024-11-09T05:55:10,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740 2024-11-09T05:55:10,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T05:55:10,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T05:55:10,825 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:10,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T05:55:10,829 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:10,830 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75159579, jitterRate=0.11996500194072723}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:10,830 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T05:55:10,830 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731131710811Initializing all the Stores at 1731131710812 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131710812Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131710812Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131710812Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131710812Cleaning up temporary data from old regions at 1731131710824 (+12 ms)Region opened successfully at 1731131710831 (+7 ms) 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T05:55:10,831 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T05:55:10,831 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:10,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731131710831Disabling compacts and flushes for region at 1731131710831Disabling writes for close at 1731131710831Writing region close event to WAL at 1731131710831Closed at 1731131710831 2024-11-09T05:55:10,833 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:10,833 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T05:55:10,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T05:55:10,836 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T05:55:10,837 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T05:55:10,851 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:10,851 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:10,851 DEBUG [RS:1;059551c538b7:38049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5868be9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:10,851 DEBUG [RS:0;059551c538b7:39265 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dab90db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:10,852 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T05:55:10,852 DEBUG [RS:2;059551c538b7:33005 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ad753d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=059551c538b7/172.17.0.2:0 2024-11-09T05:55:10,863 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;059551c538b7:39265 2024-11-09T05:55:10,863 INFO [RS:0;059551c538b7:39265 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:10,863 INFO [RS:0;059551c538b7:39265 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:10,863 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:10,864 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39443,1731131710116 with port=39265, startcode=1731131710290 2024-11-09T05:55:10,864 DEBUG [RS:0;059551c538b7:39265 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:10,867 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53319, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:10,867 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;059551c538b7:38049 2024-11-09T05:55:10,867 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;059551c538b7:33005 2024-11-09T05:55:10,867 INFO [RS:2;059551c538b7:33005 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:10,867 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,39265,1731131710290 2024-11-09T05:55:10,867 INFO [RS:1;059551c538b7:38049 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T05:55:10,868 INFO [RS:2;059551c538b7:33005 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:10,868 INFO [RS:1;059551c538b7:38049 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T05:55:10,868 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:10,868 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(517): Registering regionserver=059551c538b7,39265,1731131710290 2024-11-09T05:55:10,868 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T05:55:10,868 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39443,1731131710116 with port=33005, startcode=1731131710380 2024-11-09T05:55:10,868 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(2659): reportForDuty to master=059551c538b7,39443,1731131710116 with port=38049, startcode=1731131710337 2024-11-09T05:55:10,869 DEBUG [RS:1;059551c538b7:38049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:10,869 DEBUG [RS:2;059551c538b7:33005 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T05:55:10,870 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb 2024-11-09T05:55:10,870 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46509 2024-11-09T05:55:10,870 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:10,871 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50629, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:10,871 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35317, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T05:55:10,871 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,33005,1731131710380 2024-11-09T05:55:10,871 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(517): Registering regionserver=059551c538b7,33005,1731131710380 2024-11-09T05:55:10,873 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 059551c538b7,38049,1731131710337 2024-11-09T05:55:10,873 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39443 {}] master.ServerManager(517): Registering regionserver=059551c538b7,38049,1731131710337 2024-11-09T05:55:10,873 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb 2024-11-09T05:55:10,873 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46509 2024-11-09T05:55:10,873 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:10,875 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb 2024-11-09T05:55:10,875 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46509 2024-11-09T05:55:10,875 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T05:55:10,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:10,917 DEBUG [RS:0;059551c538b7:39265 {}] zookeeper.ZKUtil(111): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,39265,1731131710290 2024-11-09T05:55:10,917 WARN [RS:0;059551c538b7:39265 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:10,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,38049,1731131710337] 2024-11-09T05:55:10,917 INFO [RS:0;059551c538b7:39265 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T05:55:10,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,39265,1731131710290] 2024-11-09T05:55:10,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [059551c538b7,33005,1731131710380] 2024-11-09T05:55:10,917 DEBUG [RS:1;059551c538b7:38049 {}] zookeeper.ZKUtil(111): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,38049,1731131710337 2024-11-09T05:55:10,917 DEBUG [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,39265,1731131710290 2024-11-09T05:55:10,917 WARN [RS:1;059551c538b7:38049 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:10,917 DEBUG [RS:2;059551c538b7:33005 {}] zookeeper.ZKUtil(111): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/059551c538b7,33005,1731131710380 2024-11-09T05:55:10,917 WARN [RS:2;059551c538b7:33005 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T05:55:10,917 INFO [RS:1;059551c538b7:38049 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T05:55:10,917 INFO [RS:2;059551c538b7:33005 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T05:55:10,917 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,38049,1731131710337 2024-11-09T05:55:10,918 DEBUG [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,33005,1731131710380 2024-11-09T05:55:10,926 INFO [RS:2;059551c538b7:33005 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:10,926 INFO [RS:1;059551c538b7:38049 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:10,926 INFO [RS:0;059551c538b7:39265 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T05:55:10,929 INFO [RS:2;059551c538b7:33005 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:10,932 INFO [RS:0;059551c538b7:39265 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:10,932 INFO [RS:2;059551c538b7:33005 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:10,932 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,932 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:10,932 INFO [RS:0;059551c538b7:39265 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:10,932 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,933 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:10,933 INFO [RS:2;059551c538b7:33005 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:10,933 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,934 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 INFO [RS:0;059551c538b7:39265 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:10,935 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 INFO [RS:1;059551c538b7:38049 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T05:55:10,935 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,935 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,935 DEBUG [RS:2;059551c538b7:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,935 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,935 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 INFO [RS:1;059551c538b7:38049 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T05:55:10,936 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,936 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,937 DEBUG [RS:0;059551c538b7:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,939 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,33005,1731131710380-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:1;059551c538b7:38049 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T05:55:10,940 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39265,1731131710290-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:10,940 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/059551c538b7:0, corePoolSize=2, maxPoolSize=2 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,941 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,942 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/059551c538b7:0, corePoolSize=1, maxPoolSize=1 2024-11-09T05:55:10,942 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,942 DEBUG [RS:1;059551c538b7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0, corePoolSize=3, maxPoolSize=3 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,945 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,38049,1731131710337-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:10,957 INFO [RS:2;059551c538b7:33005 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:10,957 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,33005,1731131710380-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,957 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,957 INFO [RS:2;059551c538b7:33005 {}] regionserver.Replication(171): 059551c538b7,33005,1731131710380 started 2024-11-09T05:55:10,960 INFO [RS:0;059551c538b7:39265 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:10,960 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39265,1731131710290-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,960 INFO [RS:1;059551c538b7:38049 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T05:55:10,960 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,960 INFO [RS:0;059551c538b7:39265 {}] regionserver.Replication(171): 059551c538b7,39265,1731131710290 started 2024-11-09T05:55:10,960 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,38049,1731131710337-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,960 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,960 INFO [RS:1;059551c538b7:38049 {}] regionserver.Replication(171): 059551c538b7,38049,1731131710337 started 2024-11-09T05:55:10,971 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,971 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,33005,1731131710380, RpcServer on 059551c538b7/172.17.0.2:33005, sessionid=0x1011e2996380003 2024-11-09T05:55:10,971 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:10,971 DEBUG [RS:2;059551c538b7:33005 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,33005,1731131710380 2024-11-09T05:55:10,971 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,33005,1731131710380' 2024-11-09T05:55:10,972 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:10,972 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:10,973 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:10,973 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:10,973 DEBUG [RS:2;059551c538b7:33005 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,33005,1731131710380 2024-11-09T05:55:10,973 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,33005,1731131710380' 2024-11-09T05:55:10,973 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:10,974 DEBUG [RS:2;059551c538b7:33005 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:10,974 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,974 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:10,974 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,38049,1731131710337, RpcServer on 059551c538b7/172.17.0.2:38049, sessionid=0x1011e2996380002 2024-11-09T05:55:10,974 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1482): Serving as 059551c538b7,39265,1731131710290, RpcServer on 059551c538b7/172.17.0.2:39265, sessionid=0x1011e2996380001 2024-11-09T05:55:10,974 DEBUG [RS:2;059551c538b7:33005 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:10,974 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:10,974 INFO [RS:2;059551c538b7:33005 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:10,974 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T05:55:10,974 DEBUG [RS:1;059551c538b7:38049 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,38049,1731131710337 2024-11-09T05:55:10,974 INFO [RS:2;059551c538b7:33005 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:10,974 DEBUG [RS:0;059551c538b7:39265 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 059551c538b7,39265,1731131710290 2024-11-09T05:55:10,974 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,38049,1731131710337' 2024-11-09T05:55:10,974 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,39265,1731131710290' 2024-11-09T05:55:10,974 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:10,974 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,39265,1731131710290 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 059551c538b7,38049,1731131710337 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,39265,1731131710290' 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '059551c538b7,38049,1731131710337' 2024-11-09T05:55:10,975 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:10,975 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T05:55:10,976 DEBUG [RS:0;059551c538b7:39265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:10,976 DEBUG [RS:1;059551c538b7:38049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T05:55:10,976 DEBUG [RS:0;059551c538b7:39265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:10,976 INFO [RS:0;059551c538b7:39265 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:10,976 INFO [RS:0;059551c538b7:39265 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:10,976 DEBUG [RS:1;059551c538b7:38049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T05:55:10,976 INFO [RS:1;059551c538b7:38049 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T05:55:10,976 INFO [RS:1;059551c538b7:38049 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T05:55:10,987 WARN [059551c538b7:39443 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T05:55:11,078 INFO [RS:2;059551c538b7:33005 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C33005%2C1731131710380, suffix=, logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,33005,1731131710380, archiveDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs, maxLogs=32 2024-11-09T05:55:11,080 INFO [RS:0;059551c538b7:39265 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C39265%2C1731131710290, suffix=, logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,39265,1731131710290, archiveDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs, maxLogs=32 2024-11-09T05:55:11,080 INFO [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C38049%2C1731131710337, suffix=, logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,38049,1731131710337, archiveDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs, maxLogs=32 2024-11-09T05:55:11,084 INFO [RS:2;059551c538b7:33005 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 059551c538b7%2C33005%2C1731131710380.1731131711084 2024-11-09T05:55:11,084 INFO [RS:0;059551c538b7:39265 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 059551c538b7%2C39265%2C1731131710290.1731131711084 2024-11-09T05:55:11,085 INFO [RS:1;059551c538b7:38049 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 059551c538b7%2C38049%2C1731131710337.1731131711085 2024-11-09T05:55:11,094 INFO [RS:0;059551c538b7:39265 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,39265,1731131710290/059551c538b7%2C39265%2C1731131710290.1731131711084 2024-11-09T05:55:11,095 INFO [RS:2;059551c538b7:33005 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,33005,1731131710380/059551c538b7%2C33005%2C1731131710380.1731131711084 2024-11-09T05:55:11,096 INFO [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,38049,1731131710337/059551c538b7%2C38049%2C1731131710337.1731131711085 2024-11-09T05:55:11,096 DEBUG [RS:0;059551c538b7:39265 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41501:41501),(127.0.0.1/127.0.0.1:36907:36907),(127.0.0.1/127.0.0.1:43797:43797)] 2024-11-09T05:55:11,096 DEBUG [RS:2;059551c538b7:33005 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41501:41501),(127.0.0.1/127.0.0.1:36907:36907),(127.0.0.1/127.0.0.1:43797:43797)] 2024-11-09T05:55:11,098 DEBUG [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36907:36907),(127.0.0.1/127.0.0.1:41501:41501),(127.0.0.1/127.0.0.1:43797:43797)] 2024-11-09T05:55:11,238 DEBUG [059551c538b7:39443 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T05:55:11,239 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(204): Hosts are {059551c538b7=0} racks are {/default-rack=0} 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T05:55:11,244 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T05:55:11,245 INFO [059551c538b7:39443 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T05:55:11,245 INFO [059551c538b7:39443 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T05:55:11,245 INFO [059551c538b7:39443 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T05:55:11,245 DEBUG [059551c538b7:39443 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T05:55:11,246 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=059551c538b7,38049,1731131710337 2024-11-09T05:55:11,249 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 059551c538b7,38049,1731131710337, state=OPENING 2024-11-09T05:55:11,268 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T05:55:11,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:11,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:11,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:11,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:11,353 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T05:55:11,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=059551c538b7,38049,1731131710337}] 2024-11-09T05:55:11,354 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,354 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,510 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T05:55:11,513 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51949, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T05:55:11,520 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T05:55:11,521 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T05:55:11,524 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=059551c538b7%2C38049%2C1731131710337.meta, suffix=.meta, logDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,38049,1731131710337, archiveDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs, maxLogs=32 2024-11-09T05:55:11,526 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 059551c538b7%2C38049%2C1731131710337.meta.1731131711525.meta 2024-11-09T05:55:11,533 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/WALs/059551c538b7,38049,1731131710337/059551c538b7%2C38049%2C1731131710337.meta.1731131711525.meta 2024-11-09T05:55:11,534 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36907:36907),(127.0.0.1/127.0.0.1:41501:41501),(127.0.0.1/127.0.0.1:43797:43797)] 2024-11-09T05:55:11,534 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T05:55:11,535 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T05:55:11,535 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T05:55:11,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T05:55:11,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T05:55:11,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:11,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:11,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T05:55:11,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T05:55:11,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:11,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:11,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T05:55:11,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T05:55:11,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:11,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:11,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T05:55:11,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T05:55:11,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:11,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T05:55:11,546 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T05:55:11,547 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740 2024-11-09T05:55:11,548 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740 2024-11-09T05:55:11,550 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T05:55:11,550 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T05:55:11,550 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T05:55:11,552 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T05:55:11,553 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63424855, jitterRate=-0.05489601194858551}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T05:55:11,553 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T05:55:11,554 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731131711536Writing region info on filesystem at 1731131711536Initializing all the Stores at 1731131711537 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131711537Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131711538 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131711538Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731131711538Cleaning up temporary data from old regions at 1731131711550 (+12 ms)Running coprocessor post-open hooks at 1731131711553 (+3 ms)Region opened successfully at 1731131711554 (+1 ms) 2024-11-09T05:55:11,556 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731131711510 2024-11-09T05:55:11,559 DEBUG [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T05:55:11,559 INFO [RS_OPEN_META-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T05:55:11,560 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=059551c538b7,38049,1731131710337 2024-11-09T05:55:11,562 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 059551c538b7,38049,1731131710337, state=OPEN 2024-11-09T05:55:11,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:11,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:11,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:11,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T05:55:11,594 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=059551c538b7,38049,1731131710337 2024-11-09T05:55:11,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T05:55:11,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T05:55:11,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=059551c538b7,38049,1731131710337 in 241 msec 2024-11-09T05:55:11,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T05:55:11,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 770 msec 2024-11-09T05:55:11,609 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T05:55:11,609 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T05:55:11,611 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T05:55:11,611 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=059551c538b7,38049,1731131710337, seqNum=-1] 2024-11-09T05:55:11,611 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T05:55:11,613 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40421, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T05:55:11,622 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 842 msec 2024-11-09T05:55:11,622 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731131711622, completionTime=-1 2024-11-09T05:55:11,622 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T05:55:11,622 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T05:55:11,624 INFO [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T05:55:11,624 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731131771624 2024-11-09T05:55:11,624 INFO [master/059551c538b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731131831624 2024-11-09T05:55:11,624 INFO [master/059551c538b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-09T05:55:11,625 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T05:55:11,625 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,626 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,626 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,626 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-059551c538b7:39443, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,626 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,626 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,629 DEBUG [master/059551c538b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T05:55:11,632 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.180sec 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T05:55:11,633 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T05:55:11,636 DEBUG [master/059551c538b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T05:55:11,636 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T05:55:11,636 INFO [master/059551c538b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=059551c538b7,39443,1731131710116-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T05:55:11,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1301f665, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:11,710 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 059551c538b7,39443,-1 for getting cluster id 2024-11-09T05:55:11,711 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T05:55:11,713 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bf9c1675-8b1a-48f2-90b2-3d9bf10700c0' 2024-11-09T05:55:11,715 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T05:55:11,715 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bf9c1675-8b1a-48f2-90b2-3d9bf10700c0" 2024-11-09T05:55:11,715 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ea2eb3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:11,716 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [059551c538b7,39443,-1] 2024-11-09T05:55:11,716 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T05:55:11,716 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:11,718 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T05:55:11,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fadfb4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T05:55:11,719 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T05:55:11,721 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=059551c538b7,38049,1731131710337, seqNum=-1] 2024-11-09T05:55:11,721 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T05:55:11,724 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T05:55:11,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=059551c538b7,39443,1731131710116 2024-11-09T05:55:11,727 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T05:55:11,728 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 059551c538b7,39443,1731131710116 2024-11-09T05:55:11,728 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ff85198 2024-11-09T05:55:11,729 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T05:55:11,730 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T05:55:11,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T05:55:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T05:55:11,735 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T05:55:11,736 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:11,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T05:55:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:11,738 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T05:55:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741837_1013 (size=392) 2024-11-09T05:55:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741837_1013 (size=392) 2024-11-09T05:55:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741837_1013 (size=392) 2024-11-09T05:55:11,750 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b42707dd018e4415394cf06db355efeb, NAME => 'TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb 2024-11-09T05:55:11,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741838_1014 (size=51) 2024-11-09T05:55:11,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741838_1014 (size=51) 2024-11-09T05:55:11,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741838_1014 (size=51) 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing b42707dd018e4415394cf06db355efeb, disabling compactions & flushes 2024-11-09T05:55:11,762 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. after waiting 0 ms 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:11,762 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:11,762 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for b42707dd018e4415394cf06db355efeb: Waiting for close lock at 1731131711762Disabling compacts and flushes for region at 1731131711762Disabling writes for close at 1731131711762Writing region close event to WAL at 1731131711762Closed at 1731131711762 2024-11-09T05:55:11,765 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T05:55:11,765 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731131711765"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731131711765"}]},"ts":"1731131711765"} 2024-11-09T05:55:11,769 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T05:55:11,770 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T05:55:11,771 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731131711771"}]},"ts":"1731131711771"} 2024-11-09T05:55:11,774 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T05:55:11,774 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {059551c538b7=0} racks are {/default-rack=0} 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T05:55:11,775 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T05:55:11,775 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T05:55:11,775 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T05:55:11,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T05:55:11,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b42707dd018e4415394cf06db355efeb, ASSIGN}] 2024-11-09T05:55:11,778 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b42707dd018e4415394cf06db355efeb, ASSIGN 2024-11-09T05:55:11,779 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b42707dd018e4415394cf06db355efeb, ASSIGN; state=OFFLINE, location=059551c538b7,38049,1731131710337; forceNewPlan=false, retain=false 2024-11-09T05:55:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:11,930 INFO [059551c538b7:39443 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T05:55:11,931 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b42707dd018e4415394cf06db355efeb, regionState=OPENING, regionLocation=059551c538b7,38049,1731131710337 2024-11-09T05:55:11,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b42707dd018e4415394cf06db355efeb, ASSIGN because future has completed 2024-11-09T05:55:11,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b42707dd018e4415394cf06db355efeb, server=059551c538b7,38049,1731131710337}] 2024-11-09T05:55:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:12,104 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,105 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b42707dd018e4415394cf06db355efeb, NAME => 'TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb.', STARTKEY => '', ENDKEY => ''} 2024-11-09T05:55:12,105 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,105 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T05:55:12,105 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,105 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,107 INFO [StoreOpener-b42707dd018e4415394cf06db355efeb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,109 INFO [StoreOpener-b42707dd018e4415394cf06db355efeb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b42707dd018e4415394cf06db355efeb columnFamilyName cf 2024-11-09T05:55:12,109 DEBUG [StoreOpener-b42707dd018e4415394cf06db355efeb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T05:55:12,110 INFO [StoreOpener-b42707dd018e4415394cf06db355efeb-1 {}] regionserver.HStore(327): Store=b42707dd018e4415394cf06db355efeb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T05:55:12,110 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,111 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,111 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,112 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,112 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,114 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,116 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T05:55:12,117 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b42707dd018e4415394cf06db355efeb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70675587, jitterRate=0.053148314356803894}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T05:55:12,117 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,118 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b42707dd018e4415394cf06db355efeb: Running coprocessor pre-open hook at 1731131712105Writing region info on filesystem at 1731131712105Initializing all the Stores at 1731131712107 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731131712107Cleaning up temporary data from old regions at 1731131712112 (+5 ms)Running coprocessor post-open hooks at 1731131712117 (+5 ms)Region opened successfully at 1731131712118 (+1 ms) 2024-11-09T05:55:12,120 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb., pid=6, masterSystemTime=1731131712093 2024-11-09T05:55:12,123 DEBUG [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,123 INFO [RS_OPEN_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,124 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b42707dd018e4415394cf06db355efeb, regionState=OPEN, openSeqNum=2, regionLocation=059551c538b7,38049,1731131710337 2024-11-09T05:55:12,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b42707dd018e4415394cf06db355efeb, server=059551c538b7,38049,1731131710337 because future has completed 2024-11-09T05:55:12,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T05:55:12,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b42707dd018e4415394cf06db355efeb, server=059551c538b7,38049,1731131710337 in 192 msec 2024-11-09T05:55:12,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T05:55:12,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b42707dd018e4415394cf06db355efeb, ASSIGN in 358 msec 2024-11-09T05:55:12,141 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T05:55:12,141 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731131712141"}]},"ts":"1731131712141"} 2024-11-09T05:55:12,144 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T05:55:12,146 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T05:55:12,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 415 msec 2024-11-09T05:55:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T05:55:12,368 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T05:55:12,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T05:55:12,368 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T05:55:12,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T05:55:12,373 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T05:55:12,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T05:55:12,378 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb., hostname=059551c538b7,38049,1731131710337, seqNum=2] 2024-11-09T05:55:12,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T05:55:12,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T05:55:12,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:12,386 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T05:55:12,388 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T05:55:12,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T05:55:12,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:12,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T05:55:12,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,545 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b42707dd018e4415394cf06db355efeb 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T05:55:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/.tmp/cf/87e9f73c4b244dc6aab4e32ea86b7b6d is 36, key is row/cf:cq/1731131712379/Put/seqid=0 2024-11-09T05:55:12,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741839_1015 (size=4787) 2024-11-09T05:55:12,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741839_1015 (size=4787) 2024-11-09T05:55:12,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741839_1015 (size=4787) 2024-11-09T05:55:12,577 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/.tmp/cf/87e9f73c4b244dc6aab4e32ea86b7b6d 2024-11-09T05:55:12,583 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-09T05:55:12,583 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-09T05:55:12,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/.tmp/cf/87e9f73c4b244dc6aab4e32ea86b7b6d as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/cf/87e9f73c4b244dc6aab4e32ea86b7b6d 2024-11-09T05:55:12,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T05:55:12,586 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-09T05:55:12,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-09T05:55:12,586 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-09T05:55:12,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-09T05:55:12,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-09T05:55:12,593 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/cf/87e9f73c4b244dc6aab4e32ea86b7b6d, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T05:55:12,595 INFO [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for b42707dd018e4415394cf06db355efeb in 50ms, sequenceid=5, compaction requested=false 2024-11-09T05:55:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b42707dd018e4415394cf06db355efeb: 2024-11-09T05:55:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/059551c538b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T05:55:12,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T05:55:12,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T05:55:12,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-09T05:55:12,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 219 msec 2024-11-09T05:55:12,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T05:55:12,708 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T05:55:12,716 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T05:55:12,716 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T05:55:12,717 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:12,717 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,717 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,717 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T05:55:12,717 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T05:55:12,717 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1249407103, stopped=false 2024-11-09T05:55:12,718 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=059551c538b7,39443,1731131710116 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:12,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:12,788 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T05:55:12,788 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T05:55:12,789 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:12,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,39265,1731131710290' ***** 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,38049,1731131710337' ***** 2024-11-09T05:55:12,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:12,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:12,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '059551c538b7,33005,1731131710380' ***** 2024-11-09T05:55:12,789 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T05:55:12,789 INFO [RS:1;059551c538b7:38049 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:12,789 INFO [RS:0;059551c538b7:39265 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:12,789 INFO [RS:2;059551c538b7:33005 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T05:55:12,789 INFO [RS:1;059551c538b7:38049 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:12,789 INFO [RS:2;059551c538b7:33005 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:12,789 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:12,789 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:12,790 INFO [RS:1;059551c538b7:38049 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:12,790 INFO [RS:2;059551c538b7:33005 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:12,790 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T05:55:12,790 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,33005,1731131710380 2024-11-09T05:55:12,790 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(3091): Received CLOSE for b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,790 INFO [RS:2;059551c538b7:33005 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:12,790 INFO [RS:2;059551c538b7:33005 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;059551c538b7:33005. 2024-11-09T05:55:12,790 DEBUG [RS:2;059551c538b7:33005 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:12,790 DEBUG [RS:2;059551c538b7:33005 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,790 INFO [RS:0;059551c538b7:39265 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T05:55:12,790 INFO [RS:0;059551c538b7:39265 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T05:55:12,790 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,33005,1731131710380; all regions closed. 2024-11-09T05:55:12,790 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,39265,1731131710290 2024-11-09T05:55:12,790 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(959): stopping server 059551c538b7,38049,1731131710337 2024-11-09T05:55:12,790 INFO [RS:0;059551c538b7:39265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:12,790 INFO [RS:1;059551c538b7:38049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:12,790 INFO [RS:0;059551c538b7:39265 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;059551c538b7:39265. 2024-11-09T05:55:12,790 INFO [RS:1;059551c538b7:38049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;059551c538b7:38049. 2024-11-09T05:55:12,790 DEBUG [RS:0;059551c538b7:39265 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:12,790 DEBUG [RS:1;059551c538b7:38049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T05:55:12,791 DEBUG [RS:0;059551c538b7:39265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,791 DEBUG [RS:1;059551c538b7:38049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,791 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,39265,1731131710290; all regions closed. 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b42707dd018e4415394cf06db355efeb, disabling compactions & flushes 2024-11-09T05:55:12,791 INFO [RS:1;059551c538b7:38049 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:12,791 INFO [RS:1;059551c538b7:38049 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:12,791 INFO [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,791 INFO [RS:1;059551c538b7:38049 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,791 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. after waiting 0 ms 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,791 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-09T05:55:12,791 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1325): Online Regions={b42707dd018e4415394cf06db355efeb=TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb., 1588230740=hbase:meta,,1.1588230740} 2024-11-09T05:55:12,791 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 DEBUG [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b42707dd018e4415394cf06db355efeb 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T05:55:12,791 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T05:55:12,791 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,791 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T05:55:12,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,792 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T05:55:12,792 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T05:55:12,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,792 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T05:55:12,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741834_1010 (size=93) 2024-11-09T05:55:12,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741834_1010 (size=93) 2024-11-09T05:55:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741833_1009 (size=93) 2024-11-09T05:55:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741833_1009 (size=93) 2024-11-09T05:55:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741834_1010 (size=93) 2024-11-09T05:55:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741833_1009 (size=93) 2024-11-09T05:55:12,802 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/default/TestHBaseWalOnEC/b42707dd018e4415394cf06db355efeb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T05:55:12,803 DEBUG [RS:2;059551c538b7:33005 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs 2024-11-09T05:55:12,803 INFO [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,803 INFO [RS:2;059551c538b7:33005 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 059551c538b7%2C33005%2C1731131710380:(num 1731131711084) 2024-11-09T05:55:12,803 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b42707dd018e4415394cf06db355efeb: Waiting for close lock at 1731131712791Running coprocessor pre-close hooks at 1731131712791Disabling compacts and flushes for region at 1731131712791Disabling writes for close at 1731131712791Writing region close event to WAL at 1731131712792 (+1 ms)Running coprocessor post-close hooks at 1731131712803 (+11 ms)Closed at 1731131712803 2024-11-09T05:55:12,803 DEBUG [RS:2;059551c538b7:33005 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,803 INFO [RS:2;059551c538b7:33005 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:12,803 DEBUG [RS_CLOSE_REGION-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb. 2024-11-09T05:55:12,803 INFO [RS:2;059551c538b7:33005 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:12,804 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:12,804 DEBUG [RS:0;059551c538b7:39265 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 059551c538b7%2C39265%2C1731131710290:(num 1731131711084) 2024-11-09T05:55:12,804 DEBUG [RS:0;059551c538b7:39265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:12,804 INFO [RS:2;059551c538b7:33005 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33005 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T05:55:12,804 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:12,804 INFO [RS:0;059551c538b7:39265 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T05:55:12,805 INFO [RS:0;059551c538b7:39265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:12,805 INFO [RS:0;059551c538b7:39265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39265 2024-11-09T05:55:12,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:12,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,33005,1731131710380 2024-11-09T05:55:12,815 INFO [RS:2;059551c538b7:33005 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:12,816 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/info/5c49ac9c3bf6455ab8f32c872386247b is 153, key is TestHBaseWalOnEC,,1731131711731.b42707dd018e4415394cf06db355efeb./info:regioninfo/1731131712124/Put/seqid=0 2024-11-09T05:55:12,818 WARN [IPC Server handler 4 on default port 46509 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T05:55:12,818 WARN [IPC Server handler 4 on default port 46509 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T05:55:12,818 WARN [IPC Server handler 4 on default port 46509 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T05:55:12,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741840_1016 (size=6637) 2024-11-09T05:55:12,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741840_1016 (size=6637) 2024-11-09T05:55:12,823 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/info/5c49ac9c3bf6455ab8f32c872386247b 2024-11-09T05:55:12,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,39265,1731131710290 2024-11-09T05:55:12,825 INFO [RS:0;059551c538b7:39265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:12,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,39265,1731131710290] 2024-11-09T05:55:12,842 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:12,842 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:12,846 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,39265,1731131710290 already deleted, retry=false 2024-11-09T05:55:12,846 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,39265,1731131710290 expired; onlineServers=2 2024-11-09T05:55:12,846 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,33005,1731131710380] 2024-11-09T05:55:12,848 INFO [regionserver/059551c538b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:12,852 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/ns/4f2a3aaa6dc14813982409c90dca6b6a is 43, key is default/ns:d/1731131711614/Put/seqid=0 2024-11-09T05:55:12,857 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,33005,1731131710380 already deleted, retry=false 2024-11-09T05:55:12,857 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,33005,1731131710380 expired; onlineServers=1 2024-11-09T05:55:12,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741841_1017 (size=5153) 2024-11-09T05:55:12,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741841_1017 (size=5153) 2024-11-09T05:55:12,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741841_1017 (size=5153) 2024-11-09T05:55:12,861 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/ns/4f2a3aaa6dc14813982409c90dca6b6a 2024-11-09T05:55:12,883 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/table/a67c31883fa9461f8196123a3a95465d is 52, key is TestHBaseWalOnEC/table:state/1731131712141/Put/seqid=0 2024-11-09T05:55:12,884 WARN [IPC Server handler 1 on default port 46509 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T05:55:12,884 WARN [IPC Server handler 1 on default port 46509 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T05:55:12,885 WARN [IPC Server handler 1 on default port 46509 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T05:55:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741842_1018 (size=5249) 2024-11-09T05:55:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741842_1018 (size=5249) 2024-11-09T05:55:12,890 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/table/a67c31883fa9461f8196123a3a95465d 2024-11-09T05:55:12,898 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/info/5c49ac9c3bf6455ab8f32c872386247b as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/info/5c49ac9c3bf6455ab8f32c872386247b 2024-11-09T05:55:12,907 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/info/5c49ac9c3bf6455ab8f32c872386247b, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T05:55:12,909 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/ns/4f2a3aaa6dc14813982409c90dca6b6a as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/ns/4f2a3aaa6dc14813982409c90dca6b6a 2024-11-09T05:55:12,917 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/ns/4f2a3aaa6dc14813982409c90dca6b6a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T05:55:12,918 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/.tmp/table/a67c31883fa9461f8196123a3a95465d as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/table/a67c31883fa9461f8196123a3a95465d 2024-11-09T05:55:12,928 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/table/a67c31883fa9461f8196123a3a95465d, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T05:55:12,930 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-11-09T05:55:12,936 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T05:55:12,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:12,936 INFO [RS:2;059551c538b7:33005 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:12,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x1011e2996380003, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:12,936 INFO [RS:2;059551c538b7:33005 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,33005,1731131710380; zookeeper connection closed. 2024-11-09T05:55:12,936 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a837399 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a837399 2024-11-09T05:55:12,937 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T05:55:12,937 INFO [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:12,937 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731131712791Running coprocessor pre-close hooks at 1731131712791Disabling compacts and flushes for region at 1731131712791Disabling writes for close at 1731131712792 (+1 ms)Obtaining lock to block concurrent updates at 1731131712792Preparing flush snapshotting stores in 1588230740 at 1731131712792Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731131712792Flushing stores of hbase:meta,,1.1588230740 at 1731131712793 (+1 ms)Flushing 1588230740/info: creating writer at 1731131712793Flushing 1588230740/info: appending metadata at 1731131712816 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731131712816Flushing 1588230740/ns: creating writer at 1731131712832 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731131712852 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731131712852Flushing 1588230740/table: creating writer at 1731131712868 (+16 ms)Flushing 1588230740/table: appending metadata at 1731131712882 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731131712883 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58cc587c: reopening flushed file at 1731131712897 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bfeba22: reopening flushed file at 1731131712907 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fd7cc2e: reopening flushed file at 1731131712917 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1731131712930 (+13 ms)Writing region close event to WAL at 1731131712931 (+1 ms)Running coprocessor post-close hooks at 1731131712937 (+6 ms)Closed at 1731131712937 2024-11-09T05:55:12,937 DEBUG [RS_CLOSE_META-regionserver/059551c538b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T05:55:12,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:12,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x1011e2996380001, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:12,947 INFO [RS:0;059551c538b7:39265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:12,947 INFO [RS:0;059551c538b7:39265 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,39265,1731131710290; zookeeper connection closed. 2024-11-09T05:55:12,947 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75852a9b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75852a9b 2024-11-09T05:55:12,948 INFO [regionserver/059551c538b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-09T05:55:12,948 INFO [regionserver/059551c538b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-09T05:55:12,991 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(976): stopping server 059551c538b7,38049,1731131710337; all regions closed. 2024-11-09T05:55:12,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,993 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,993 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741836_1012 (size=2751) 2024-11-09T05:55:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741836_1012 (size=2751) 2024-11-09T05:55:12,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741836_1012 (size=2751) 2024-11-09T05:55:12,998 DEBUG [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs 2024-11-09T05:55:12,998 INFO [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 059551c538b7%2C38049%2C1731131710337.meta:.meta(num 1731131711525) 2024-11-09T05:55:12,999 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,999 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,999 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,999 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:12,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741835_1011 (size=1298) 2024-11-09T05:55:13,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741835_1011 (size=1298) 2024-11-09T05:55:13,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741835_1011 (size=1298) 2024-11-09T05:55:13,005 DEBUG [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/oldWALs 2024-11-09T05:55:13,005 INFO [RS:1;059551c538b7:38049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 059551c538b7%2C38049%2C1731131710337:(num 1731131711085) 2024-11-09T05:55:13,005 DEBUG [RS:1;059551c538b7:38049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T05:55:13,005 INFO [RS:1;059551c538b7:38049 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T05:55:13,005 INFO [RS:1;059551c538b7:38049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:13,005 INFO [RS:1;059551c538b7:38049 {}] hbase.ChoreService(370): Chore service for: regionserver/059551c538b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:13,006 INFO [RS:1;059551c538b7:38049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:13,006 INFO [regionserver/059551c538b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:13,006 INFO [RS:1;059551c538b7:38049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38049 2024-11-09T05:55:13,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T05:55:13,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/059551c538b7,38049,1731131710337 2024-11-09T05:55:13,015 INFO [RS:1;059551c538b7:38049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:13,025 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [059551c538b7,38049,1731131710337] 2024-11-09T05:55:13,036 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/059551c538b7,38049,1731131710337 already deleted, retry=false 2024-11-09T05:55:13,036 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 059551c538b7,38049,1731131710337 expired; onlineServers=0 2024-11-09T05:55:13,036 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '059551c538b7,39443,1731131710116' ***** 2024-11-09T05:55:13,036 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T05:55:13,036 INFO [M:0;059551c538b7:39443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T05:55:13,036 INFO [M:0;059551c538b7:39443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T05:55:13,036 DEBUG [M:0;059551c538b7:39443 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T05:55:13,036 DEBUG [M:0;059551c538b7:39443 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T05:55:13,036 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T05:55:13,036 DEBUG [master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131710785 {}] cleaner.HFileCleaner(306): Exit Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.large.0-1731131710785,5,FailOnTimeoutGroup] 2024-11-09T05:55:13,036 DEBUG [master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131710785 {}] cleaner.HFileCleaner(306): Exit Thread[master/059551c538b7:0:becomeActiveMaster-HFileCleaner.small.0-1731131710785,5,FailOnTimeoutGroup] 2024-11-09T05:55:13,037 INFO [M:0;059551c538b7:39443 {}] hbase.ChoreService(370): Chore service for: master/059551c538b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T05:55:13,037 INFO [M:0;059551c538b7:39443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T05:55:13,037 DEBUG [M:0;059551c538b7:39443 {}] master.HMaster(1795): Stopping service threads 2024-11-09T05:55:13,037 INFO [M:0;059551c538b7:39443 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T05:55:13,037 INFO [M:0;059551c538b7:39443 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T05:55:13,037 INFO [M:0;059551c538b7:39443 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T05:55:13,037 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T05:55:13,046 DEBUG [M:0;059551c538b7:39443 {}] zookeeper.ZKUtil(347): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T05:55:13,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T05:55:13,046 WARN [M:0;059551c538b7:39443 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T05:55:13,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T05:55:13,047 INFO [M:0;059551c538b7:39443 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/.lastflushedseqids 2024-11-09T05:55:13,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741843_1019 (size=127) 2024-11-09T05:55:13,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741843_1019 (size=127) 2024-11-09T05:55:13,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741843_1019 (size=127) 2024-11-09T05:55:13,055 INFO [M:0;059551c538b7:39443 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T05:55:13,055 INFO [M:0;059551c538b7:39443 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T05:55:13,055 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T05:55:13,055 INFO [M:0;059551c538b7:39443 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:13,055 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:13,055 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T05:55:13,055 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:13,055 INFO [M:0;059551c538b7:39443 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-09T05:55:13,073 DEBUG [M:0;059551c538b7:39443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39d1ee5e89754d0ebe81ceb07ac7836b is 82, key is hbase:meta,,1/info:regioninfo/1731131711560/Put/seqid=0 2024-11-09T05:55:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741844_1020 (size=5672) 2024-11-09T05:55:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741844_1020 (size=5672) 2024-11-09T05:55:13,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741844_1020 (size=5672) 2024-11-09T05:55:13,081 INFO [M:0;059551c538b7:39443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39d1ee5e89754d0ebe81ceb07ac7836b 2024-11-09T05:55:13,109 DEBUG [M:0;059551c538b7:39443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c88d870bfb64d7bab48658a7f7c9384 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731131712148/Put/seqid=0 2024-11-09T05:55:13,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741845_1021 (size=6441) 2024-11-09T05:55:13,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741845_1021 (size=6441) 2024-11-09T05:55:13,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741845_1021 (size=6441) 2024-11-09T05:55:13,117 INFO [M:0;059551c538b7:39443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c88d870bfb64d7bab48658a7f7c9384 2024-11-09T05:55:13,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:13,126 INFO [RS:1;059551c538b7:38049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:13,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x1011e2996380002, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:13,126 INFO [RS:1;059551c538b7:38049 {}] regionserver.HRegionServer(1031): Exiting; stopping=059551c538b7,38049,1731131710337; zookeeper connection closed. 2024-11-09T05:55:13,126 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@36eddbe6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@36eddbe6 2024-11-09T05:55:13,126 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T05:55:13,141 DEBUG [M:0;059551c538b7:39443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4fb1fb1032524e3a8cf5cebfe1c18b61 is 69, key is 059551c538b7,33005,1731131710380/rs:state/1731131710871/Put/seqid=0 2024-11-09T05:55:13,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741846_1022 (size=5294) 2024-11-09T05:55:13,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741846_1022 (size=5294) 2024-11-09T05:55:13,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741846_1022 (size=5294) 2024-11-09T05:55:13,151 INFO [M:0;059551c538b7:39443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4fb1fb1032524e3a8cf5cebfe1c18b61 2024-11-09T05:55:13,160 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39d1ee5e89754d0ebe81ceb07ac7836b as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39d1ee5e89754d0ebe81ceb07ac7836b 2024-11-09T05:55:13,170 INFO [M:0;059551c538b7:39443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39d1ee5e89754d0ebe81ceb07ac7836b, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T05:55:13,171 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c88d870bfb64d7bab48658a7f7c9384 as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c88d870bfb64d7bab48658a7f7c9384 2024-11-09T05:55:13,180 INFO [M:0;059551c538b7:39443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c88d870bfb64d7bab48658a7f7c9384, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T05:55:13,182 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4fb1fb1032524e3a8cf5cebfe1c18b61 as hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4fb1fb1032524e3a8cf5cebfe1c18b61 2024-11-09T05:55:13,191 INFO [M:0;059551c538b7:39443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46509/user/jenkins/test-data/81dce8ea-02b5-1d66-1055-8aa22deb33fb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4fb1fb1032524e3a8cf5cebfe1c18b61, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T05:55:13,193 INFO [M:0;059551c538b7:39443 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false 2024-11-09T05:55:13,194 INFO [M:0;059551c538b7:39443 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T05:55:13,194 DEBUG [M:0;059551c538b7:39443 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731131713055Disabling compacts and flushes for region at 1731131713055Disabling writes for close at 1731131713055Obtaining lock to block concurrent updates at 1731131713055Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731131713055Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1731131713056 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731131713057 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731131713057Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731131713072 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731131713072Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731131713088 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731131713108 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731131713108Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731131713125 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731131713141 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731131713141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45a19cbf: reopening flushed file at 1731131713158 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4020550: reopening flushed file at 1731131713170 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f80437f: reopening flushed file at 1731131713180 (+10 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false at 1731131713193 (+13 ms)Writing region close event to WAL at 1731131713194 (+1 ms)Closed at 1731131713194 2024-11-09T05:55:13,195 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,195 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,195 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,195 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,195 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T05:55:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741830_1006 (size=32695) 2024-11-09T05:55:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741830_1006 (size=32695) 2024-11-09T05:55:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741830_1006 (size=32695) 2024-11-09T05:55:13,199 INFO [M:0;059551c538b7:39443 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T05:55:13,199 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T05:55:13,199 INFO [M:0;059551c538b7:39443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39443 2024-11-09T05:55:13,199 INFO [M:0;059551c538b7:39443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T05:55:13,309 INFO [M:0;059551c538b7:39443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T05:55:13,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:13,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39443-0x1011e2996380000, quorum=127.0.0.1:58997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T05:55:13,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44672b71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:13,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36a9ca95{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:13,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:13,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438bc7ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:13,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c77de1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:13,314 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:13,314 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:13,314 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1438717172-172.17.0.2-1731131707697 (Datanode Uuid bf9af889-d903-4c20-b191-9e031457ed69) service to localhost/127.0.0.1:46509 2024-11-09T05:55:13,314 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:13,314 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data5/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data6/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,315 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:13,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30bdc6f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:13,317 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5547eae9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:13,317 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:13,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b0441b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:13,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5afbc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:13,319 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:13,319 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:13,319 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:13,319 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1438717172-172.17.0.2-1731131707697 (Datanode Uuid cd798d89-dac4-4c4f-8157-ce99c2b505f5) service to localhost/127.0.0.1:46509 2024-11-09T05:55:13,319 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data3/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,320 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data4/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,320 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:13,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eeef71e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T05:55:13,324 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70feba44{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:13,324 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:13,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20a0e688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:13,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61d23bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:13,325 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T05:55:13,325 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T05:55:13,325 WARN [BP-1438717172-172.17.0.2-1731131707697 heartbeating to localhost/127.0.0.1:46509 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1438717172-172.17.0.2-1731131707697 (Datanode Uuid bb31c275-e95e-4611-813b-ae8895b4b747) service to localhost/127.0.0.1:46509 2024-11-09T05:55:13,325 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T05:55:13,326 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data1/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,326 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/cluster_9b956a6b-9fbd-c821-5da5-7746b29aeb8b/data/data2/current/BP-1438717172-172.17.0.2-1731131707697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T05:55:13,326 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T05:55:13,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c97821d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T05:55:13,332 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@544c0dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T05:55:13,332 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T05:55:13,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62802e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T05:55:13,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16eaa68d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/2e65931f-4177-184b-8bf5-785d8c6d6dd3/hadoop.log.dir/,STOPPED} 2024-11-09T05:55:13,338 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T05:55:13,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T05:55:13,369 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=151 (was 91) - Thread LEAK? -, OpenFileDescriptor=516 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=77 (was 41) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1527 (was 1730)