2024-11-10 15:52:23,249 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 15:52:23,262 main DEBUG Took 0.011183 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 15:52:23,263 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 15:52:23,263 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 15:52:23,264 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 15:52:23,265 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,279 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 15:52:23,292 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,294 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,295 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,296 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,298 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,299 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,301 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,302 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,304 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,304 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,305 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,305 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,306 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,306 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,307 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,308 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,309 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 15:52:23,309 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,310 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 15:52:23,312 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 15:52:23,313 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 15:52:23,316 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 15:52:23,317 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 15:52:23,318 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 15:52:23,319 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 15:52:23,331 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 15:52:23,335 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 15:52:23,337 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 15:52:23,338 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 15:52:23,338 main DEBUG createAppenders(={Console}) 2024-11-10 15:52:23,339 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-10 15:52:23,340 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 15:52:23,340 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-10 15:52:23,341 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 15:52:23,341 main DEBUG OutputStream closed 2024-11-10 15:52:23,342 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 15:52:23,342 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 15:52:23,343 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-10 15:52:23,426 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 15:52:23,429 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 15:52:23,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 15:52:23,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 15:52:23,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 15:52:23,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 15:52:23,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 15:52:23,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 15:52:23,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 15:52:23,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 15:52:23,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 15:52:23,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 15:52:23,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 15:52:23,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 15:52:23,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 15:52:23,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 15:52:23,437 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 15:52:23,437 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 15:52:23,439 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 15:52:23,440 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-10 15:52:23,440 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 15:52:23,440 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-10T15:52:23,453 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-10 15:52:23,456 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 15:52:23,456 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T15:52:23,674 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e 2024-11-10T15:52:23,698 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3, deleteOnExit=true 2024-11-10T15:52:23,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/test.cache.data in system properties and HBase conf 2024-11-10T15:52:23,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T15:52:23,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir in system properties and HBase conf 2024-11-10T15:52:23,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T15:52:23,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T15:52:23,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T15:52:23,782 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T15:52:23,871 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T15:52:23,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:52:23,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:52:23,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T15:52:23,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:52:23,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T15:52:23,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T15:52:23,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:52:23,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:52:23,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T15:52:23,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/nfs.dump.dir in system properties and HBase conf 2024-11-10T15:52:23,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/java.io.tmpdir in system properties and HBase conf 2024-11-10T15:52:23,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:52:23,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T15:52:23,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T15:52:24,955 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T15:52:25,027 INFO [Time-limited test {}] log.Log(170): Logging initialized @2612ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T15:52:25,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:25,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:25,174 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:25,174 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:25,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:25,188 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:25,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:25,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:25,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/java.io.tmpdir/jetty-localhost-35371-hadoop-hdfs-3_4_1-tests_jar-_-any-17130302474272159072/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:52:25,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:35371} 2024-11-10T15:52:25,375 INFO [Time-limited test {}] server.Server(415): Started @2960ms 2024-11-10T15:52:25,933 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:25,940 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:25,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:25,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:25,941 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:25,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:25,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:26,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/java.io.tmpdir/jetty-localhost-41441-hadoop-hdfs-3_4_1-tests_jar-_-any-15066650338809795235/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:26,041 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:41441} 2024-11-10T15:52:26,042 INFO [Time-limited test {}] server.Server(415): Started @3627ms 2024-11-10T15:52:26,090 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:26,196 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:26,202 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:26,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:26,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:26,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:26,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:26,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:26,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/java.io.tmpdir/jetty-localhost-34559-hadoop-hdfs-3_4_1-tests_jar-_-any-10584264338489112491/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:26,312 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:34559} 2024-11-10T15:52:26,312 INFO [Time-limited test {}] server.Server(415): Started @3898ms 2024-11-10T15:52:26,315 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:26,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:26,354 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:26,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:26,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:26,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:26,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:26,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:26,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/java.io.tmpdir/jetty-localhost-39859-hadoop-hdfs-3_4_1-tests_jar-_-any-14646114643011026747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:26,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:39859} 2024-11-10T15:52:26,466 INFO [Time-limited test {}] server.Server(415): Started @4051ms 2024-11-10T15:52:26,469 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:28,043 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data3/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,043 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data2/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,043 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data4/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,043 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data1/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,078 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:28,078 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:28,098 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data5/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,099 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data6/current/BP-2013058530-172.17.0.3-1731253944422/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:28,118 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x539cc35d671456a9 with lease ID 0x3c3b116880d02ed9: Processing first storage report for DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00 from datanode DatanodeRegistration(127.0.0.1:46535, datanodeUuid=aa8cd665-04a1-43eb-becc-07fb607431fd, infoPort=43675, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x539cc35d671456a9 with lease ID 0x3c3b116880d02ed9: from storage DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00 node DatanodeRegistration(127.0.0.1:46535, datanodeUuid=aa8cd665-04a1-43eb-becc-07fb607431fd, infoPort=43675, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcc5d4e4a3171234 with lease ID 0x3c3b116880d02eda: Processing first storage report for DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9 from datanode DatanodeRegistration(127.0.0.1:35019, datanodeUuid=3feff3af-413c-469c-878a-c8503eb3b8e6, infoPort=43657, infoSecurePort=0, ipcPort=34077, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcc5d4e4a3171234 with lease ID 0x3c3b116880d02eda: from storage DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9 node DatanodeRegistration(127.0.0.1:35019, datanodeUuid=3feff3af-413c-469c-878a-c8503eb3b8e6, infoPort=43657, infoSecurePort=0, ipcPort=34077, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ec20a3b9f47055 with lease ID 0x3c3b116880d02edb: Processing first storage report for DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed from datanode DatanodeRegistration(127.0.0.1:36193, datanodeUuid=16d6c1da-7bdf-4cf0-89f9-b0b1c1e2d7b6, infoPort=45817, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ec20a3b9f47055 with lease ID 0x3c3b116880d02edb: from storage DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed node DatanodeRegistration(127.0.0.1:36193, datanodeUuid=16d6c1da-7bdf-4cf0-89f9-b0b1c1e2d7b6, infoPort=45817, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x539cc35d671456a9 with lease ID 0x3c3b116880d02ed9: Processing first storage report for DS-9c86960d-f994-4f9b-b433-1bed09f2176b from datanode DatanodeRegistration(127.0.0.1:46535, datanodeUuid=aa8cd665-04a1-43eb-becc-07fb607431fd, infoPort=43675, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x539cc35d671456a9 with lease ID 0x3c3b116880d02ed9: from storage DS-9c86960d-f994-4f9b-b433-1bed09f2176b node DatanodeRegistration(127.0.0.1:46535, datanodeUuid=aa8cd665-04a1-43eb-becc-07fb607431fd, infoPort=43675, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcc5d4e4a3171234 with lease ID 0x3c3b116880d02eda: Processing first storage report for DS-f101a36b-5e9f-4701-a657-66f8512c8e79 from datanode DatanodeRegistration(127.0.0.1:35019, datanodeUuid=3feff3af-413c-469c-878a-c8503eb3b8e6, infoPort=43657, infoSecurePort=0, ipcPort=34077, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcc5d4e4a3171234 with lease ID 0x3c3b116880d02eda: from storage DS-f101a36b-5e9f-4701-a657-66f8512c8e79 node DatanodeRegistration(127.0.0.1:35019, datanodeUuid=3feff3af-413c-469c-878a-c8503eb3b8e6, infoPort=43657, infoSecurePort=0, ipcPort=34077, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ec20a3b9f47055 with lease ID 0x3c3b116880d02edb: Processing first storage report for DS-1e1f6ab5-d212-48f4-8af0-d3bbbc7ccb93 from datanode DatanodeRegistration(127.0.0.1:36193, datanodeUuid=16d6c1da-7bdf-4cf0-89f9-b0b1c1e2d7b6, infoPort=45817, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422) 2024-11-10T15:52:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ec20a3b9f47055 with lease ID 0x3c3b116880d02edb: from storage DS-1e1f6ab5-d212-48f4-8af0-d3bbbc7ccb93 node DatanodeRegistration(127.0.0.1:36193, datanodeUuid=16d6c1da-7bdf-4cf0-89f9-b0b1c1e2d7b6, infoPort=45817, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=1751586293;c=1731253944422), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:28,186 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e 2024-11-10T15:52:28,256 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-10T15:52:28,326 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=67, ProcessCount=11, AvailableMemoryMB=7507 2024-11-10T15:52:28,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T15:52:28,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-10T15:52:28,428 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/zookeeper_0, clientPort=55405, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T15:52:28,437 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55405 2024-11-10T15:52:28,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:28,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:28,525 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:28,525 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:28,568 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:44210 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44210 dst: /127.0.0.1:46535 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:28,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-10T15:52:28,989 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:29,003 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 with version=8 2024-11-10T15:52:29,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/hbase-staging 2024-11-10T15:52:29,086 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T15:52:29,319 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:29,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,333 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:29,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:29,456 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T15:52:29,512 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T15:52:29,521 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T15:52:29,525 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:29,549 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 95688 (auto-detected) 2024-11-10T15:52:29,550 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-10T15:52:29,567 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40063 2024-11-10T15:52:29,586 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40063 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-11-10T15:52:29,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:400630x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:29,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40063-0x1012572cba70000 connected 2024-11-10T15:52:29,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:29,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:29,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:29,837 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017, hbase.cluster.distributed=false 2024-11-10T15:52:29,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:29,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40063 2024-11-10T15:52:29,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40063 2024-11-10T15:52:29,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40063 2024-11-10T15:52:29,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40063 2024-11-10T15:52:29,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40063 2024-11-10T15:52:29,963 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:29,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,965 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:29,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:29,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:29,967 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:29,971 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:29,972 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42259 2024-11-10T15:52:29,974 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42259 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-11-10T15:52:29,976 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:29,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:29,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422590x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:29,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422590x0, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:29,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42259-0x1012572cba70001 connected 2024-11-10T15:52:30,001 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:30,008 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:30,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:30,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:30,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42259 2024-11-10T15:52:30,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42259 2024-11-10T15:52:30,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42259 2024-11-10T15:52:30,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42259 2024-11-10T15:52:30,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42259 2024-11-10T15:52:30,037 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:30,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,038 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:30,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:30,038 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:30,038 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:30,039 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45839 2024-11-10T15:52:30,041 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45839 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-11-10T15:52:30,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:30,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:30,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458390x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:30,060 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45839-0x1012572cba70002 connected 2024-11-10T15:52:30,060 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:30,061 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:30,063 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:30,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:30,066 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:30,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45839 2024-11-10T15:52:30,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45839 2024-11-10T15:52:30,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45839 2024-11-10T15:52:30,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45839 2024-11-10T15:52:30,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45839 2024-11-10T15:52:30,088 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:30,089 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:30,090 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34189 2024-11-10T15:52:30,092 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34189 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-11-10T15:52:30,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:30,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:30,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341890x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:30,108 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:341890x0, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:30,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34189-0x1012572cba70003 connected 2024-11-10T15:52:30,108 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:30,109 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:30,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:30,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:30,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34189 2024-11-10T15:52:30,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34189 2024-11-10T15:52:30,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34189 2024-11-10T15:52:30,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34189 2024-11-10T15:52:30,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34189 2024-11-10T15:52:30,129 DEBUG [M:0;c0771061be61:40063 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0771061be61:40063 2024-11-10T15:52:30,130 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0771061be61,40063,1731253949165 2024-11-10T15:52:30,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,146 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0771061be61,40063,1731253949165 2024-11-10T15:52:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:30,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,188 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T15:52:30,189 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0771061be61,40063,1731253949165 from backup master directory 2024-11-10T15:52:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0771061be61,40063,1731253949165 2024-11-10T15:52:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:30,203 WARN [master/c0771061be61:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:30,204 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0771061be61,40063,1731253949165 2024-11-10T15:52:30,206 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T15:52:30,208 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T15:52:30,266 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/hbase.id] with ID: 48444733-03ac-471b-8244-229afff7a518 2024-11-10T15:52:30,266 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/.tmp/hbase.id 2024-11-10T15:52:30,273 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,273 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:44236 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44236 dst: /127.0.0.1:46535 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:30,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-10T15:52:30,283 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:30,283 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/.tmp/hbase.id]:[hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/hbase.id] 2024-11-10T15:52:30,325 INFO [master/c0771061be61:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:30,329 INFO [master/c0771061be61:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T15:52:30,347 INFO [master/c0771061be61:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-10T15:52:30,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,372 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,372 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:52168 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52168 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:30,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-10T15:52:30,382 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:30,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:52:30,397 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T15:52:30,402 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:52:30,430 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,430 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,433 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:59504 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:36193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59504 dst: /127.0.0.1:36193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-10T15:52:30,440 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:30,460 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store 2024-11-10T15:52:30,477 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,477 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:30,480 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:59530 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59530 dst: /127.0.0.1:36193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:30,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-10T15:52:30,485 WARN [master/c0771061be61:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:30,489 INFO [master/c0771061be61:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T15:52:30,491 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:30,492 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:52:30,493 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:30,493 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:30,494 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:52:30,494 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:30,495 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:30,496 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731253950492Disabling compacts and flushes for region at 1731253950492Disabling writes for close at 1731253950494 (+2 ms)Writing region close event to WAL at 1731253950495 (+1 ms)Closed at 1731253950495 2024-11-10T15:52:30,498 WARN [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/.initializing 2024-11-10T15:52:30,498 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/WALs/c0771061be61,40063,1731253949165 2024-11-10T15:52:30,506 INFO [master/c0771061be61:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:52:30,519 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C40063%2C1731253949165, suffix=, logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/WALs/c0771061be61,40063,1731253949165, archiveDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/oldWALs, maxLogs=10 2024-11-10T15:52:30,546 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/WALs/c0771061be61,40063,1731253949165/c0771061be61%2C40063%2C1731253949165.1731253950523, exclude list is [], retry=0 2024-11-10T15:52:30,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:30,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35019,DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9,DISK] 2024-11-10T15:52:30,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46535,DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00,DISK] 2024-11-10T15:52:30,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36193,DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed,DISK] 2024-11-10T15:52:30,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-10T15:52:30,604 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/WALs/c0771061be61,40063,1731253949165/c0771061be61%2C40063%2C1731253949165.1731253950523 2024-11-10T15:52:30,605 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:45817:45817),(127.0.0.1/127.0.0.1:43657:43657)] 2024-11-10T15:52:30,605 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:30,605 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:30,608 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,609 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,670 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T15:52:30,673 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:30,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:30,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T15:52:30,680 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:30,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:30,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T15:52:30,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:30,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:30,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T15:52:30,689 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:30,690 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:30,691 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,694 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,695 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,700 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,701 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,705 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:30,709 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:30,716 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:30,717 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63125065, jitterRate=-0.0593632310628891}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:30,724 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731253950619Initializing all the Stores at 1731253950621 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253950621Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253950622 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253950622Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253950622Cleaning up temporary data from old regions at 1731253950701 (+79 ms)Region opened successfully at 1731253950724 (+23 ms) 2024-11-10T15:52:30,726 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T15:52:30,757 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2401138c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:30,784 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T15:52:30,793 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T15:52:30,793 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T15:52:30,795 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T15:52:30,797 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T15:52:30,801 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-10T15:52:30,801 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T15:52:30,823 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T15:52:30,830 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T15:52:30,886 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T15:52:30,890 INFO [master/c0771061be61:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T15:52:30,893 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T15:52:30,901 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T15:52:30,903 INFO [master/c0771061be61:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T15:52:30,907 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T15:52:30,917 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T15:52:30,919 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T15:52:30,928 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T15:52:30,949 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T15:52:30,959 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:30,981 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0771061be61,40063,1731253949165, sessionid=0x1012572cba70000, setting cluster-up flag (Was=false) 2024-11-10T15:52:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,049 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T15:52:31,054 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0771061be61,40063,1731253949165 2024-11-10T15:52:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:31,111 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T15:52:31,114 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0771061be61,40063,1731253949165 2024-11-10T15:52:31,120 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T15:52:31,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-10T15:52:31,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-10T15:52:31,185 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:31,194 INFO [master/c0771061be61:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T15:52:31,200 INFO [master/c0771061be61:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T15:52:31,205 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0771061be61,40063,1731253949165 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T15:52:31,211 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:31,211 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0771061be61:0, corePoolSize=10, maxPoolSize=10 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:31,212 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,214 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731253981214 2024-11-10T15:52:31,215 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T15:52:31,216 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T15:52:31,218 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:31,218 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T15:52:31,219 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(746): ClusterId : 48444733-03ac-471b-8244-229afff7a518 2024-11-10T15:52:31,219 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(746): ClusterId : 48444733-03ac-471b-8244-229afff7a518 2024-11-10T15:52:31,219 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(746): ClusterId : 48444733-03ac-471b-8244-229afff7a518 2024-11-10T15:52:31,220 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T15:52:31,220 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T15:52:31,221 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T15:52:31,221 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T15:52:31,222 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:31,222 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:31,222 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:31,222 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,224 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:31,224 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T15:52:31,226 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T15:52:31,227 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T15:52:31,228 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T15:52:31,230 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T15:52:31,230 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T15:52:31,230 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:31,230 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:31,233 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253951231,5,FailOnTimeoutGroup] 2024-11-10T15:52:31,234 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253951233,5,FailOnTimeoutGroup] 2024-11-10T15:52:31,234 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,234 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T15:52:31,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:52204 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52204 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:31,236 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,236 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-10T15:52:31,240 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:31,241 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T15:52:31,242 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 2024-11-10T15:52:31,245 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:31,245 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:31,245 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:31,246 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:31,246 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:31,246 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:31,248 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:31,249 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:31,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:59564 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59564 dst: /127.0.0.1:36193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:31,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-10T15:52:31,257 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:31,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:31,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:52:31,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:52:31,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:31,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:31,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:52:31,266 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:31,266 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:31,266 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:31,267 DEBUG [RS:0;c0771061be61:42259 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a50c87f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:31,267 DEBUG [RS:1;c0771061be61:45839 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e58c855, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:31,267 DEBUG [RS:2;c0771061be61:34189 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d8cc81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:31,268 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:52:31,268 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:31,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:31,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:52:31,273 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:52:31,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:31,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:31,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:52:31,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:52:31,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:31,280 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:31,280 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:52:31,282 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740 2024-11-10T15:52:31,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740 2024-11-10T15:52:31,283 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;c0771061be61:34189 2024-11-10T15:52:31,286 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:52:31,286 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:52:31,287 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c0771061be61:45839 2024-11-10T15:52:31,288 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0771061be61:42259 2024-11-10T15:52:31,288 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:31,288 INFO [RS:2;c0771061be61:34189 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:31,288 INFO [RS:0;c0771061be61:42259 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:31,288 INFO [RS:1;c0771061be61:45839 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:31,288 INFO [RS:1;c0771061be61:45839 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:31,288 INFO [RS:0;c0771061be61:42259 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:31,288 INFO [RS:2;c0771061be61:34189 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:31,288 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:31,288 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:31,288 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:31,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:52:31,291 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,40063,1731253949165 with port=34189, startcode=1731253950088 2024-11-10T15:52:31,291 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,40063,1731253949165 with port=42259, startcode=1731253949931 2024-11-10T15:52:31,291 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,40063,1731253949165 with port=45839, startcode=1731253950036 2024-11-10T15:52:31,301 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:31,303 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72006238, jitterRate=0.07297655940055847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:31,305 DEBUG [RS:1;c0771061be61:45839 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:31,305 DEBUG [RS:2;c0771061be61:34189 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:31,305 DEBUG [RS:0;c0771061be61:42259 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:31,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731253951258Initializing all the Stores at 1731253951260 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253951260Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253951261 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253951261Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253951261Cleaning up temporary data from old regions at 1731253951286 (+25 ms)Region opened successfully at 1731253951308 (+22 ms) 2024-11-10T15:52:31,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:52:31,308 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:52:31,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:52:31,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:52:31,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:52:31,310 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:31,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731253951308Disabling compacts and flushes for region at 1731253951308Disabling writes for close at 1731253951309 (+1 ms)Writing region close event to WAL at 1731253951310 (+1 ms)Closed at 1731253951310 2024-11-10T15:52:31,332 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:31,332 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T15:52:31,339 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50277, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:31,339 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35753, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:31,339 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56307, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:31,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T15:52:31,345 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,34189,1731253950088 2024-11-10T15:52:31,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(517): Registering regionserver=c0771061be61,34189,1731253950088 2024-11-10T15:52:31,351 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:52:31,354 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T15:52:31,359 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,45839,1731253950036 2024-11-10T15:52:31,360 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(517): Registering regionserver=c0771061be61,45839,1731253950036 2024-11-10T15:52:31,363 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,42259,1731253949931 2024-11-10T15:52:31,364 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40063 {}] master.ServerManager(517): Registering regionserver=c0771061be61,42259,1731253949931 2024-11-10T15:52:31,364 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 2024-11-10T15:52:31,364 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 2024-11-10T15:52:31,365 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38825 2024-11-10T15:52:31,365 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38825 2024-11-10T15:52:31,365 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:31,365 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:31,367 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 2024-11-10T15:52:31,367 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38825 2024-11-10T15:52:31,367 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:31,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:31,465 DEBUG [RS:1;c0771061be61:45839 {}] zookeeper.ZKUtil(111): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,45839,1731253950036 2024-11-10T15:52:31,465 DEBUG [RS:2;c0771061be61:34189 {}] zookeeper.ZKUtil(111): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,34189,1731253950088 2024-11-10T15:52:31,465 WARN [RS:2;c0771061be61:34189 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:31,465 WARN [RS:1;c0771061be61:45839 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:31,465 INFO [RS:1;c0771061be61:45839 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:52:31,465 INFO [RS:2;c0771061be61:34189 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:52:31,466 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088 2024-11-10T15:52:31,466 DEBUG [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,45839,1731253950036 2024-11-10T15:52:31,466 DEBUG [RS:0;c0771061be61:42259 {}] zookeeper.ZKUtil(111): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,42259,1731253949931 2024-11-10T15:52:31,466 WARN [RS:0;c0771061be61:42259 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:31,466 INFO [RS:0;c0771061be61:42259 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:52:31,466 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,42259,1731253949931 2024-11-10T15:52:31,467 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,42259,1731253949931] 2024-11-10T15:52:31,467 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,45839,1731253950036] 2024-11-10T15:52:31,467 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,34189,1731253950088] 2024-11-10T15:52:31,493 INFO [RS:1;c0771061be61:45839 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:31,493 INFO [RS:0;c0771061be61:42259 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:31,493 INFO [RS:2;c0771061be61:34189 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:31,505 WARN [c0771061be61:40063 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T15:52:31,511 INFO [RS:2;c0771061be61:34189 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:31,511 INFO [RS:0;c0771061be61:42259 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:31,511 INFO [RS:1;c0771061be61:45839 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:31,516 INFO [RS:2;c0771061be61:34189 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:31,516 INFO [RS:0;c0771061be61:42259 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:31,516 INFO [RS:1;c0771061be61:45839 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:31,517 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,517 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,517 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,518 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:31,518 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:31,519 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:31,525 INFO [RS:0;c0771061be61:42259 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:31,525 INFO [RS:2;c0771061be61:34189 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:31,525 INFO [RS:1;c0771061be61:45839 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:31,526 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,526 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,526 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,527 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,528 DEBUG [RS:0;c0771061be61:42259 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:31,528 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,528 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,529 DEBUG [RS:2;c0771061be61:34189 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,529 DEBUG [RS:1;c0771061be61:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:31,535 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,535 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,535 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,42259,1731253949931-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,34189,1731253950088-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,536 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,537 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,537 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,45839,1731253950036-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:31,560 INFO [RS:0;c0771061be61:42259 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:31,560 INFO [RS:2;c0771061be61:34189 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:31,560 INFO [RS:1;c0771061be61:45839 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:31,562 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,45839,1731253950036-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,34189,1731253950088-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,42259,1731253949931-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,562 INFO [RS:1;c0771061be61:45839 {}] regionserver.Replication(171): c0771061be61,45839,1731253950036 started 2024-11-10T15:52:31,563 INFO [RS:0;c0771061be61:42259 {}] regionserver.Replication(171): c0771061be61,42259,1731253949931 started 2024-11-10T15:52:31,563 INFO [RS:2;c0771061be61:34189 {}] regionserver.Replication(171): c0771061be61,34189,1731253950088 started 2024-11-10T15:52:31,583 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,583 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,584 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,34189,1731253950088, RpcServer on c0771061be61/172.17.0.3:34189, sessionid=0x1012572cba70003 2024-11-10T15:52:31,584 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,45839,1731253950036, RpcServer on c0771061be61/172.17.0.3:45839, sessionid=0x1012572cba70002 2024-11-10T15:52:31,584 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:31,584 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:31,585 DEBUG [RS:1;c0771061be61:45839 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,45839,1731253950036 2024-11-10T15:52:31,585 DEBUG [RS:2;c0771061be61:34189 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,34189,1731253950088 2024-11-10T15:52:31,585 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,45839,1731253950036' 2024-11-10T15:52:31,585 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,34189,1731253950088' 2024-11-10T15:52:31,585 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:31,585 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:31,585 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:31,585 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,42259,1731253949931, RpcServer on c0771061be61/172.17.0.3:42259, sessionid=0x1012572cba70001 2024-11-10T15:52:31,585 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:31,585 DEBUG [RS:0;c0771061be61:42259 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,42259,1731253949931 2024-11-10T15:52:31,585 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,42259,1731253949931' 2024-11-10T15:52:31,586 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:31,586 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:31,586 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:31,586 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:31,586 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:31,586 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:31,586 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:31,586 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:31,587 DEBUG [RS:1;c0771061be61:45839 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,45839,1731253950036 2024-11-10T15:52:31,587 DEBUG [RS:2;c0771061be61:34189 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,34189,1731253950088 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,42259,1731253949931 2024-11-10T15:52:31,587 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,34189,1731253950088' 2024-11-10T15:52:31,587 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,45839,1731253950036' 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,42259,1731253949931' 2024-11-10T15:52:31,587 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:31,587 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:31,587 DEBUG [RS:2;c0771061be61:34189 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:31,587 DEBUG [RS:1;c0771061be61:45839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:31,587 DEBUG [RS:0;c0771061be61:42259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:31,588 DEBUG [RS:2;c0771061be61:34189 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:31,588 DEBUG [RS:1;c0771061be61:45839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:31,588 DEBUG [RS:0;c0771061be61:42259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:31,588 INFO [RS:2;c0771061be61:34189 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:31,588 INFO [RS:1;c0771061be61:45839 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:31,588 INFO [RS:0;c0771061be61:42259 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:31,588 INFO [RS:0;c0771061be61:42259 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:31,588 INFO [RS:1;c0771061be61:45839 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:31,588 INFO [RS:2;c0771061be61:34189 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:31,693 INFO [RS:1;c0771061be61:45839 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:52:31,693 INFO [RS:0;c0771061be61:42259 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:52:31,693 INFO [RS:2;c0771061be61:34189 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T15:52:31,696 INFO [RS:1;c0771061be61:45839 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C45839%2C1731253950036, suffix=, logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,45839,1731253950036, archiveDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs, maxLogs=32 2024-11-10T15:52:31,696 INFO [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C34189%2C1731253950088, suffix=, logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088, archiveDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs, maxLogs=32 2024-11-10T15:52:31,696 INFO [RS:0;c0771061be61:42259 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C42259%2C1731253949931, suffix=, logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,42259,1731253949931, archiveDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs, maxLogs=32 2024-11-10T15:52:31,715 DEBUG [RS:1;c0771061be61:45839 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,45839,1731253950036/c0771061be61%2C45839%2C1731253950036.1731253951701, exclude list is [], retry=0 2024-11-10T15:52:31,715 DEBUG [RS:2;c0771061be61:34189 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088/c0771061be61%2C34189%2C1731253950088.1731253951701, exclude list is [], retry=0 2024-11-10T15:52:31,716 DEBUG [RS:0;c0771061be61:42259 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,42259,1731253949931/c0771061be61%2C42259%2C1731253949931.1731253951701, exclude list is [], retry=0 2024-11-10T15:52:31,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35019,DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9,DISK] 2024-11-10T15:52:31,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36193,DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed,DISK] 2024-11-10T15:52:31,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46535,DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00,DISK] 2024-11-10T15:52:31,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46535,DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00,DISK] 2024-11-10T15:52:31,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35019,DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9,DISK] 2024-11-10T15:52:31,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36193,DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed,DISK] 2024-11-10T15:52:31,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46535,DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00,DISK] 2024-11-10T15:52:31,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35019,DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9,DISK] 2024-11-10T15:52:31,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36193,DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed,DISK] 2024-11-10T15:52:31,750 INFO [RS:1;c0771061be61:45839 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,45839,1731253950036/c0771061be61%2C45839%2C1731253950036.1731253951701 2024-11-10T15:52:31,750 INFO [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088/c0771061be61%2C34189%2C1731253950088.1731253951701 2024-11-10T15:52:31,751 DEBUG [RS:1;c0771061be61:45839 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:45817:45817),(127.0.0.1/127.0.0.1:43657:43657)] 2024-11-10T15:52:31,751 DEBUG [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:43657:43657),(127.0.0.1/127.0.0.1:45817:45817)] 2024-11-10T15:52:31,751 INFO [RS:0;c0771061be61:42259 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,42259,1731253949931/c0771061be61%2C42259%2C1731253949931.1731253951701 2024-11-10T15:52:31,751 DEBUG [RS:0;c0771061be61:42259 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:43657:43657),(127.0.0.1/127.0.0.1:45817:45817)] 2024-11-10T15:52:32,010 DEBUG [c0771061be61:40063 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T15:52:32,020 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(204): Hosts are {c0771061be61=0} racks are {/default-rack=0} 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:52:32,026 INFO [c0771061be61:40063 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:52:32,026 INFO [c0771061be61:40063 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:52:32,026 INFO [c0771061be61:40063 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:52:32,026 DEBUG [c0771061be61:40063 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:52:32,033 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0771061be61,34189,1731253950088 2024-11-10T15:52:32,039 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0771061be61,34189,1731253950088, state=OPENING 2024-11-10T15:52:32,180 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T15:52:32,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:32,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:32,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:32,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:32,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,194 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:52:32,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0771061be61,34189,1731253950088}] 2024-11-10T15:52:32,378 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:52:32,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53285, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:52:32,392 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T15:52:32,392 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T15:52:32,393 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-10T15:52:32,396 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C34189%2C1731253950088.meta, suffix=.meta, logDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088, archiveDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs, maxLogs=32 2024-11-10T15:52:32,413 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088/c0771061be61%2C34189%2C1731253950088.meta.1731253952398.meta, exclude list is [], retry=0 2024-11-10T15:52:32,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46535,DS-78f4b957-ebfd-49f6-87dc-5842a71bcf00,DISK] 2024-11-10T15:52:32,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35019,DS-bb9ad83b-6361-4e02-8d97-81e52396ffe9,DISK] 2024-11-10T15:52:32,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36193,DS-c1d27c71-40fb-490d-aa4a-9d553e5d65ed,DISK] 2024-11-10T15:52:32,421 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/WALs/c0771061be61,34189,1731253950088/c0771061be61%2C34189%2C1731253950088.meta.1731253952398.meta 2024-11-10T15:52:32,421 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43675:43675),(127.0.0.1/127.0.0.1:43657:43657),(127.0.0.1/127.0.0.1:45817:45817)] 2024-11-10T15:52:32,421 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:32,423 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T15:52:32,425 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T15:52:32,429 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T15:52:32,433 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T15:52:32,434 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:32,434 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T15:52:32,434 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T15:52:32,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:52:32,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:52:32,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:32,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:32,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:52:32,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:52:32,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:32,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:32,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:52:32,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:52:32,445 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:32,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:32,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:52:32,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:52:32,447 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:32,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:32,448 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:52:32,449 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740 2024-11-10T15:52:32,452 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740 2024-11-10T15:52:32,454 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:52:32,454 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:52:32,455 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:32,458 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:52:32,459 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74348338, jitterRate=0.10787656903266907}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:32,459 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T15:52:32,461 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731253952434Writing region info on filesystem at 1731253952434Initializing all the Stores at 1731253952436 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253952437 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253952437Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253952437Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253952437Cleaning up temporary data from old regions at 1731253952454 (+17 ms)Running coprocessor post-open hooks at 1731253952459 (+5 ms)Region opened successfully at 1731253952460 (+1 ms) 2024-11-10T15:52:32,491 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731253952368 2024-11-10T15:52:32,513 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T15:52:32,514 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T15:52:32,516 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0771061be61,34189,1731253950088 2024-11-10T15:52:32,519 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0771061be61,34189,1731253950088, state=OPEN 2024-11-10T15:52:32,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:32,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:32,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:32,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:32,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:32,550 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0771061be61,34189,1731253950088 2024-11-10T15:52:32,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T15:52:32,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0771061be61,34189,1731253950088 in 354 msec 2024-11-10T15:52:32,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T15:52:32,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2180 sec 2024-11-10T15:52:32,567 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:32,567 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T15:52:32,584 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:52:32,585 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0771061be61,34189,1731253950088, seqNum=-1] 2024-11-10T15:52:32,605 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:32,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35519, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:32,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4790 sec 2024-11-10T15:52:32,630 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731253952629, completionTime=-1 2024-11-10T15:52:32,632 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T15:52:32,632 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T15:52:32,654 INFO [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T15:52:32,654 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731254012654 2024-11-10T15:52:32,654 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731254072654 2024-11-10T15:52:32,654 INFO [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 21 msec 2024-11-10T15:52:32,655 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-10T15:52:32,661 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,661 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,661 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,663 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0771061be61:40063, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,663 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,663 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,670 DEBUG [master/c0771061be61:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T15:52:32,690 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.486sec 2024-11-10T15:52:32,691 INFO [master/c0771061be61:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T15:52:32,692 INFO [master/c0771061be61:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T15:52:32,693 INFO [master/c0771061be61:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T15:52:32,693 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T15:52:32,693 INFO [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T15:52:32,694 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:32,694 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T15:52:32,699 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T15:52:32,700 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T15:52:32,700 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,40063,1731253949165-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:32,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f922879, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:32,732 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T15:52:32,732 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T15:52:32,735 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0771061be61,40063,-1 for getting cluster id 2024-11-10T15:52:32,737 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T15:52:32,744 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '48444733-03ac-471b-8244-229afff7a518' 2024-11-10T15:52:32,746 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T15:52:32,746 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "48444733-03ac-471b-8244-229afff7a518" 2024-11-10T15:52:32,746 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66d09650, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:32,747 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0771061be61,40063,-1] 2024-11-10T15:52:32,749 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T15:52:32,750 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:32,751 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60790, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T15:52:32,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49703e48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:32,754 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:52:32,760 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0771061be61,34189,1731253950088, seqNum=-1] 2024-11-10T15:52:32,760 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:32,762 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:32,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0771061be61,40063,1731253949165 2024-11-10T15:52:32,785 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T15:52:32,789 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c0771061be61,40063,1731253949165 2024-11-10T15:52:32,792 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5783941f 2024-11-10T15:52:32,793 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T15:52:32,795 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T15:52:32,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:52:32,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T15:52:32,809 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T15:52:32,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T15:52:32,812 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:32,815 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T15:52:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:32,824 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:32,824 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:32,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:50264 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:46535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50264 dst: /127.0.0.1:46535 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:32,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-10T15:52:32,834 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:32,837 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 48c6f8475284e297b68cb9d3d45269b1, NAME => 'TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017 2024-11-10T15:52:32,843 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:32,843 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:32,849 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:53126 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53126 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:32,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-10T15:52:32,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:33,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:33,257 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 48c6f8475284e297b68cb9d3d45269b1, disabling compactions & flushes 2024-11-10T15:52:33,258 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. after waiting 0 ms 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,258 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,258 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 48c6f8475284e297b68cb9d3d45269b1: Waiting for close lock at 1731253953258Disabling compacts and flushes for region at 1731253953258Disabling writes for close at 1731253953258Writing region close event to WAL at 1731253953258Closed at 1731253953258 2024-11-10T15:52:33,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T15:52:33,268 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731253953261"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731253953261"}]},"ts":"1731253953261"} 2024-11-10T15:52:33,272 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T15:52:33,274 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T15:52:33,276 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731253953274"}]},"ts":"1731253953274"} 2024-11-10T15:52:33,280 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T15:52:33,281 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c0771061be61=0} racks are {/default-rack=0} 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:52:33,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:52:33,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:52:33,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:52:33,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:52:33,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=48c6f8475284e297b68cb9d3d45269b1, ASSIGN}] 2024-11-10T15:52:33,286 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=48c6f8475284e297b68cb9d3d45269b1, ASSIGN 2024-11-10T15:52:33,288 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=48c6f8475284e297b68cb9d3d45269b1, ASSIGN; state=OFFLINE, location=c0771061be61,42259,1731253949931; forceNewPlan=false, retain=false 2024-11-10T15:52:33,444 INFO [c0771061be61:40063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T15:52:33,445 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=48c6f8475284e297b68cb9d3d45269b1, regionState=OPENING, regionLocation=c0771061be61,42259,1731253949931 2024-11-10T15:52:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=48c6f8475284e297b68cb9d3d45269b1, ASSIGN because future has completed 2024-11-10T15:52:33,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 48c6f8475284e297b68cb9d3d45269b1, server=c0771061be61,42259,1731253949931}] 2024-11-10T15:52:33,607 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:52:33,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43105, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:52:33,623 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,623 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 48c6f8475284e297b68cb9d3d45269b1, NAME => 'TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:33,624 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,624 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:33,624 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,624 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,626 INFO [StoreOpener-48c6f8475284e297b68cb9d3d45269b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,628 INFO [StoreOpener-48c6f8475284e297b68cb9d3d45269b1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 48c6f8475284e297b68cb9d3d45269b1 columnFamilyName cf 2024-11-10T15:52:33,629 DEBUG [StoreOpener-48c6f8475284e297b68cb9d3d45269b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:33,629 INFO [StoreOpener-48c6f8475284e297b68cb9d3d45269b1-1 {}] regionserver.HStore(327): Store=48c6f8475284e297b68cb9d3d45269b1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:33,630 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,631 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,632 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,633 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,633 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,636 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,641 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:33,641 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 48c6f8475284e297b68cb9d3d45269b1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59336499, jitterRate=-0.11581726372241974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T15:52:33,641 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:33,642 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 48c6f8475284e297b68cb9d3d45269b1: Running coprocessor pre-open hook at 1731253953624Writing region info on filesystem at 1731253953624Initializing all the Stores at 1731253953626 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253953626Cleaning up temporary data from old regions at 1731253953633 (+7 ms)Running coprocessor post-open hooks at 1731253953641 (+8 ms)Region opened successfully at 1731253953642 (+1 ms) 2024-11-10T15:52:33,645 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1., pid=6, masterSystemTime=1731253953607 2024-11-10T15:52:33,648 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,648 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:33,649 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=48c6f8475284e297b68cb9d3d45269b1, regionState=OPEN, openSeqNum=2, regionLocation=c0771061be61,42259,1731253949931 2024-11-10T15:52:33,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 48c6f8475284e297b68cb9d3d45269b1, server=c0771061be61,42259,1731253949931 because future has completed 2024-11-10T15:52:33,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T15:52:33,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 48c6f8475284e297b68cb9d3d45269b1, server=c0771061be61,42259,1731253949931 in 204 msec 2024-11-10T15:52:33,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T15:52:33,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=48c6f8475284e297b68cb9d3d45269b1, ASSIGN in 376 msec 2024-11-10T15:52:33,666 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T15:52:33,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731253953666"}]},"ts":"1731253953666"} 2024-11-10T15:52:33,670 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T15:52:33,671 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T15:52:33,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 869 msec 2024-11-10T15:52:33,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:33,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T15:52:33,961 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:52:33,964 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:52:33,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T15:52:33,970 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:52:33,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T15:52:33,979 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1., hostname=c0771061be61,42259,1731253949931, seqNum=2] 2024-11-10T15:52:33,980 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:33,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:33,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-10T15:52:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T15:52:33,999 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T15:52:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:34,001 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T15:52:34,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T15:52:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:34,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-10T15:52:34,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-10T15:52:34,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-10T15:52:34,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-10T15:52:34,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-10T15:52:34,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-10T15:52:34,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-10T15:52:34,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-10T15:52:34,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-10T15:52:34,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-10T15:52:34,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42259 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T15:52:34,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,174 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 48c6f8475284e297b68cb9d3d45269b1 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T15:52:34,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/.tmp/cf/d526fb6a52624268ae403571de2caee6 is 36, key is row/cf:cq/1731253953984/Put/seqid=0 2024-11-10T15:52:34,230 WARN [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,230 WARN [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-91167193_22 at /127.0.0.1:53204 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53204 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-10T15:52:34,239 WARN [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,239 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/.tmp/cf/d526fb6a52624268ae403571de2caee6 2024-11-10T15:52:34,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/.tmp/cf/d526fb6a52624268ae403571de2caee6 as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/cf/d526fb6a52624268ae403571de2caee6 2024-11-10T15:52:34,296 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/cf/d526fb6a52624268ae403571de2caee6, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T15:52:34,303 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 48c6f8475284e297b68cb9d3d45269b1 in 130ms, sequenceid=5, compaction requested=false 2024-11-10T15:52:34,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-10T15:52:34,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 48c6f8475284e297b68cb9d3d45269b1: 2024-11-10T15:52:34,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T15:52:34,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T15:52:34,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T15:52:34,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 309 msec 2024-11-10T15:52:34,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 322 msec 2024-11-10T15:52:34,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:34,318 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:52:34,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T15:52:34,331 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:52:34,332 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:34,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,337 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T15:52:34,337 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T15:52:34,337 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=523817283, stopped=false 2024-11-10T15:52:34,337 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0771061be61,40063,1731253949165 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:34,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:34,392 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:52:34,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:34,393 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:52:34,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:34,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:34,393 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:34,394 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:34,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,394 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,42259,1731253949931' ***** 2024-11-10T15:52:34,395 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:34,395 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,45839,1731253950036' ***** 2024-11-10T15:52:34,395 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:34,395 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,34189,1731253950088' ***** 2024-11-10T15:52:34,395 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:34,395 INFO [RS:1;c0771061be61:45839 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:34,396 INFO [RS:2;c0771061be61:34189 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:34,396 INFO [RS:0;c0771061be61:42259 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:34,396 INFO [RS:1;c0771061be61:45839 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:34,396 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:34,396 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:34,396 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:34,396 INFO [RS:1;c0771061be61:45839 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:34,396 INFO [RS:2;c0771061be61:34189 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:34,396 INFO [RS:2;c0771061be61:34189 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:34,396 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(959): stopping server c0771061be61,45839,1731253950036 2024-11-10T15:52:34,396 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(959): stopping server c0771061be61,34189,1731253950088 2024-11-10T15:52:34,396 INFO [RS:1;c0771061be61:45839 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:34,397 INFO [RS:2;c0771061be61:34189 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:34,397 INFO [RS:2;c0771061be61:34189 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;c0771061be61:34189. 2024-11-10T15:52:34,397 INFO [RS:1;c0771061be61:45839 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c0771061be61:45839. 2024-11-10T15:52:34,397 DEBUG [RS:2;c0771061be61:34189 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:34,397 DEBUG [RS:2;c0771061be61:34189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,397 DEBUG [RS:1;c0771061be61:45839 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:34,397 DEBUG [RS:1;c0771061be61:45839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,397 INFO [RS:2;c0771061be61:34189 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:34,397 INFO [RS:2;c0771061be61:34189 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:34,397 INFO [RS:2;c0771061be61:34189 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:34,397 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(976): stopping server c0771061be61,45839,1731253950036; all regions closed. 2024-11-10T15:52:34,398 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T15:52:34,398 INFO [RS:0;c0771061be61:42259 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:34,398 INFO [RS:0;c0771061be61:42259 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:34,398 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:52:34,398 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(3091): Received CLOSE for 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:34,399 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T15:52:34,399 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(959): stopping server c0771061be61,42259,1731253949931 2024-11-10T15:52:34,399 INFO [RS:0;c0771061be61:42259 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:34,399 INFO [RS:0;c0771061be61:42259 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0771061be61:42259. 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 48c6f8475284e297b68cb9d3d45269b1, disabling compactions & flushes 2024-11-10T15:52:34,399 DEBUG [RS:0;c0771061be61:42259 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:34,399 DEBUG [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T15:52:34,399 INFO [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,399 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:52:34,399 DEBUG [RS:0;c0771061be61:42259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. after waiting 0 ms 2024-11-10T15:52:34,399 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:52:34,399 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1325): Online Regions={48c6f8475284e297b68cb9d3d45269b1=TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1.} 2024-11-10T15:52:34,399 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,399 DEBUG [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1351): Waiting on 48c6f8475284e297b68cb9d3d45269b1 2024-11-10T15:52:34,399 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T15:52:34,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741827_1017 (size=93) 2024-11-10T15:52:34,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_1073741827_1017 (size=93) 2024-11-10T15:52:34,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_1073741827_1017 (size=93) 2024-11-10T15:52:34,411 DEBUG [RS:1;c0771061be61:45839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs 2024-11-10T15:52:34,411 INFO [RS:1;c0771061be61:45839 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c0771061be61%2C45839%2C1731253950036:(num 1731253951701) 2024-11-10T15:52:34,411 DEBUG [RS:1;c0771061be61:45839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,411 INFO [RS:1;c0771061be61:45839 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,411 INFO [RS:1;c0771061be61:45839 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:34,412 INFO [RS:1;c0771061be61:45839 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:34,412 INFO [RS:1;c0771061be61:45839 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:34,412 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:34,412 INFO [RS:1;c0771061be61:45839 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:34,412 INFO [RS:1;c0771061be61:45839 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:34,412 INFO [RS:1;c0771061be61:45839 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:34,413 INFO [RS:1;c0771061be61:45839 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45839 2024-11-10T15:52:34,417 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/default/TestHBaseWalOnEC/48c6f8475284e297b68cb9d3d45269b1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T15:52:34,419 INFO [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,419 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 48c6f8475284e297b68cb9d3d45269b1: Waiting for close lock at 1731253954399Running coprocessor pre-close hooks at 1731253954399Disabling compacts and flushes for region at 1731253954399Disabling writes for close at 1731253954399Writing region close event to WAL at 1731253954400 (+1 ms)Running coprocessor post-close hooks at 1731253954418 (+18 ms)Closed at 1731253954419 (+1 ms) 2024-11-10T15:52:34,420 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1. 2024-11-10T15:52:34,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:34,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,45839,1731253950036 2024-11-10T15:52:34,427 INFO [RS:1;c0771061be61:45839 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:34,428 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,45839,1731253950036] 2024-11-10T15:52:34,436 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/info/6cf69993c1e84664b15593b309c5b2f2 is 153, key is TestHBaseWalOnEC,,1731253952797.48c6f8475284e297b68cb9d3d45269b1./info:regioninfo/1731253953649/Put/seqid=0 2024-11-10T15:52:34,439 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,439 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,440 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,440 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,441 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,443 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1743156314_22 at /127.0.0.1:55312 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:36193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55312 dst: /127.0.0.1:36193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-10T15:52:34,448 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,45839,1731253950036 already deleted, retry=false 2024-11-10T15:52:34,448 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,45839,1731253950036 expired; onlineServers=2 2024-11-10T15:52:34,448 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,449 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/info/6cf69993c1e84664b15593b309c5b2f2 2024-11-10T15:52:34,477 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/ns/bf23c7b82e6140d385bc3dfab4ba685c is 43, key is default/ns:d/1731253952613/Put/seqid=0 2024-11-10T15:52:34,479 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,479 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,483 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1743156314_22 at /127.0.0.1:50322 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50322 dst: /127.0.0.1:46535 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-10T15:52:34,487 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,487 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/ns/bf23c7b82e6140d385bc3dfab4ba685c 2024-11-10T15:52:34,514 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/table/c624ce74dc6e45cbadb4770449ab627e is 52, key is TestHBaseWalOnEC/table:state/1731253953666/Put/seqid=0 2024-11-10T15:52:34,516 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,516 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,519 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1743156314_22 at /127.0.0.1:53230 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53230 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-10T15:52:34,523 WARN [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,523 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/table/c624ce74dc6e45cbadb4770449ab627e 2024-11-10T15:52:34,534 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/info/6cf69993c1e84664b15593b309c5b2f2 as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/info/6cf69993c1e84664b15593b309c5b2f2 2024-11-10T15:52:34,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x1012572cba70002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,538 INFO [RS:1;c0771061be61:45839 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:34,539 INFO [RS:1;c0771061be61:45839 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,45839,1731253950036; zookeeper connection closed. 2024-11-10T15:52:34,539 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10be4697 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10be4697 2024-11-10T15:52:34,542 INFO [regionserver/c0771061be61:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T15:52:34,542 INFO [regionserver/c0771061be61:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T15:52:34,545 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/info/6cf69993c1e84664b15593b309c5b2f2, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T15:52:34,546 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/ns/bf23c7b82e6140d385bc3dfab4ba685c as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/ns/bf23c7b82e6140d385bc3dfab4ba685c 2024-11-10T15:52:34,551 INFO [regionserver/c0771061be61:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T15:52:34,551 INFO [regionserver/c0771061be61:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T15:52:34,555 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/ns/bf23c7b82e6140d385bc3dfab4ba685c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T15:52:34,557 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/.tmp/table/c624ce74dc6e45cbadb4770449ab627e as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/table/c624ce74dc6e45cbadb4770449ab627e 2024-11-10T15:52:34,566 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/table/c624ce74dc6e45cbadb4770449ab627e, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T15:52:34,567 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false 2024-11-10T15:52:34,567 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T15:52:34,576 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T15:52:34,577 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T15:52:34,577 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:34,577 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731253954398Running coprocessor pre-close hooks at 1731253954399 (+1 ms)Disabling compacts and flushes for region at 1731253954399Disabling writes for close at 1731253954399Obtaining lock to block concurrent updates at 1731253954399Preparing flush snapshotting stores in 1588230740 at 1731253954399Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731253954400 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731253954402 (+2 ms)Flushing 1588230740/info: creating writer at 1731253954402Flushing 1588230740/info: appending metadata at 1731253954431 (+29 ms)Flushing 1588230740/info: closing flushed file at 1731253954431Flushing 1588230740/ns: creating writer at 1731253954460 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731253954476 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731253954476Flushing 1588230740/table: creating writer at 1731253954495 (+19 ms)Flushing 1588230740/table: appending metadata at 1731253954513 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731253954513Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29303b03: reopening flushed file at 1731253954533 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75449190: reopening flushed file at 1731253954545 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@133b26ef: reopening flushed file at 1731253954555 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false at 1731253954567 (+12 ms)Writing region close event to WAL at 1731253954569 (+2 ms)Running coprocessor post-close hooks at 1731253954576 (+7 ms)Closed at 1731253954577 (+1 ms) 2024-11-10T15:52:34,577 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:34,599 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(976): stopping server c0771061be61,34189,1731253950088; all regions closed. 2024-11-10T15:52:34,600 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(976): stopping server c0771061be61,42259,1731253949931; all regions closed. 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741828_1018 (size=1298) 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_1073741829_1019 (size=2751) 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_1073741828_1018 (size=1298) 2024-11-10T15:52:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_1073741828_1018 (size=1298) 2024-11-10T15:52:34,606 DEBUG [RS:0;c0771061be61:42259 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs 2024-11-10T15:52:34,606 INFO [RS:0;c0771061be61:42259 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c0771061be61%2C42259%2C1731253949931:(num 1731253951701) 2024-11-10T15:52:34,606 DEBUG [RS:0;c0771061be61:42259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,606 INFO [RS:0;c0771061be61:42259 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,606 DEBUG [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs 2024-11-10T15:52:34,606 INFO [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c0771061be61%2C34189%2C1731253950088.meta:.meta(num 1731253952398) 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:34,607 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:34,607 INFO [RS:0;c0771061be61:42259 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42259 2024-11-10T15:52:34,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:52:34,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:52:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_1073741826_1016 (size=93) 2024-11-10T15:52:34,614 DEBUG [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/oldWALs 2024-11-10T15:52:34,614 INFO [RS:2;c0771061be61:34189 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c0771061be61%2C34189%2C1731253950088:(num 1731253951701) 2024-11-10T15:52:34,614 DEBUG [RS:2;c0771061be61:34189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:34,614 INFO [RS:2;c0771061be61:34189 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:34,614 INFO [RS:2;c0771061be61:34189 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:34,615 INFO [RS:2;c0771061be61:34189 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:34,615 INFO [RS:2;c0771061be61:34189 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:34,615 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:34,615 INFO [RS:2;c0771061be61:34189 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34189 2024-11-10T15:52:34,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,42259,1731253949931 2024-11-10T15:52:34,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:34,616 INFO [RS:0;c0771061be61:42259 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:34,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,34189,1731253950088 2024-11-10T15:52:34,627 INFO [RS:2;c0771061be61:34189 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:34,638 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,42259,1731253949931] 2024-11-10T15:52:34,659 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,42259,1731253949931 already deleted, retry=false 2024-11-10T15:52:34,659 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,42259,1731253949931 expired; onlineServers=1 2024-11-10T15:52:34,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,34189,1731253950088] 2024-11-10T15:52:34,669 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,34189,1731253950088 already deleted, retry=false 2024-11-10T15:52:34,669 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,34189,1731253950088 expired; onlineServers=0 2024-11-10T15:52:34,669 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0771061be61,40063,1731253949165' ***** 2024-11-10T15:52:34,669 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T15:52:34,670 INFO [M:0;c0771061be61:40063 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:34,670 INFO [M:0;c0771061be61:40063 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:34,670 DEBUG [M:0;c0771061be61:40063 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T15:52:34,670 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T15:52:34,670 DEBUG [M:0;c0771061be61:40063 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T15:52:34,670 DEBUG [master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253951233 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253951233,5,FailOnTimeoutGroup] 2024-11-10T15:52:34,670 DEBUG [master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253951231 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253951231,5,FailOnTimeoutGroup] 2024-11-10T15:52:34,671 INFO [M:0;c0771061be61:40063 {}] hbase.ChoreService(370): Chore service for: master/c0771061be61:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:34,671 INFO [M:0;c0771061be61:40063 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:34,671 DEBUG [M:0;c0771061be61:40063 {}] master.HMaster(1795): Stopping service threads 2024-11-10T15:52:34,671 INFO [M:0;c0771061be61:40063 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T15:52:34,672 INFO [M:0;c0771061be61:40063 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:52:34,673 INFO [M:0;c0771061be61:40063 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T15:52:34,673 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T15:52:34,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:34,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:34,681 DEBUG [M:0;c0771061be61:40063 {}] zookeeper.ZKUtil(347): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T15:52:34,681 WARN [M:0;c0771061be61:40063 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T15:52:34,682 INFO [M:0;c0771061be61:40063 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/.lastflushedseqids 2024-11-10T15:52:34,691 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,692 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,694 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:55340 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55340 dst: /127.0.0.1:36193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-10T15:52:34,699 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,699 INFO [M:0;c0771061be61:40063 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T15:52:34,699 INFO [M:0;c0771061be61:40063 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T15:52:34,699 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:52:34,699 INFO [M:0;c0771061be61:40063 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:34,699 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:34,699 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:52:34,699 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:34,699 INFO [M:0;c0771061be61:40063 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-10T15:52:34,717 DEBUG [M:0;c0771061be61:40063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99e56a3a79b5456babe70e54e6afa266 is 82, key is hbase:meta,,1/info:regioninfo/1731253952516/Put/seqid=0 2024-11-10T15:52:34,720 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,720 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:53252 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53252 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-10T15:52:34,727 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,727 INFO [M:0;c0771061be61:40063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99e56a3a79b5456babe70e54e6afa266 2024-11-10T15:52:34,738 INFO [RS:0;c0771061be61:42259 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:34,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42259-0x1012572cba70001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,738 INFO [RS:0;c0771061be61:42259 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,42259,1731253949931; zookeeper connection closed. 2024-11-10T15:52:34,738 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a2e5436 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a2e5436 2024-11-10T15:52:34,748 INFO [RS:2;c0771061be61:34189 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:34,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,748 INFO [RS:2;c0771061be61:34189 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,34189,1731253950088; zookeeper connection closed. 2024-11-10T15:52:34,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34189-0x1012572cba70003, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,749 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@15cf935 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@15cf935 2024-11-10T15:52:34,749 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T15:52:34,753 DEBUG [M:0;c0771061be61:40063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3068743a2c0f4176a3b848a870c173d3 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731253953673/Put/seqid=0 2024-11-10T15:52:34,755 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,755 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:53272 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53272 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775552_1037 (size=6441) 2024-11-10T15:52:34,762 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,763 INFO [M:0;c0771061be61:40063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3068743a2c0f4176a3b848a870c173d3 2024-11-10T15:52:34,788 DEBUG [M:0;c0771061be61:40063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f2fa98dd3cf41adb2e192b57d11319d is 69, key is c0771061be61,34189,1731253950088/rs:state/1731253951350/Put/seqid=0 2024-11-10T15:52:34,790 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,790 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T15:52:34,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1448674904_22 at /127.0.0.1:53280 [Receiving block BP-2013058530-172.17.0.3-1731253944422:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35019:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53280 dst: /127.0.0.1:35019 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T15:52:34,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-10T15:52:34,797 WARN [M:0;c0771061be61:40063 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T15:52:34,797 INFO [M:0;c0771061be61:40063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f2fa98dd3cf41adb2e192b57d11319d 2024-11-10T15:52:34,805 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99e56a3a79b5456babe70e54e6afa266 as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/99e56a3a79b5456babe70e54e6afa266 2024-11-10T15:52:34,812 INFO [M:0;c0771061be61:40063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/99e56a3a79b5456babe70e54e6afa266, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T15:52:34,813 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3068743a2c0f4176a3b848a870c173d3 as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3068743a2c0f4176a3b848a870c173d3 2024-11-10T15:52:34,821 INFO [M:0;c0771061be61:40063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3068743a2c0f4176a3b848a870c173d3, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T15:52:34,822 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f2fa98dd3cf41adb2e192b57d11319d as hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9f2fa98dd3cf41adb2e192b57d11319d 2024-11-10T15:52:34,830 INFO [M:0;c0771061be61:40063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9f2fa98dd3cf41adb2e192b57d11319d, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T15:52:34,832 INFO [M:0;c0771061be61:40063 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false 2024-11-10T15:52:34,833 INFO [M:0;c0771061be61:40063 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:34,834 DEBUG [M:0;c0771061be61:40063 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731253954699Disabling compacts and flushes for region at 1731253954699Disabling writes for close at 1731253954699Obtaining lock to block concurrent updates at 1731253954699Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731253954699Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1731253954700 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731253954701 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731253954701Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731253954717 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731253954717Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731253954735 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731253954752 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731253954752Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731253954770 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731253954787 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731253954787Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e0ceeea: reopening flushed file at 1731253954804 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cfca3b9: reopening flushed file at 1731253954812 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bbeefc: reopening flushed file at 1731253954821 (+9 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false at 1731253954832 (+11 ms)Writing region close event to WAL at 1731253954833 (+1 ms)Closed at 1731253954833 2024-11-10T15:52:34,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36193 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:52:34,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46535 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:52:34,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741825_1011 (size=32695) 2024-11-10T15:52:34,838 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:34,838 INFO [M:0;c0771061be61:40063 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T15:52:34,839 INFO [M:0;c0771061be61:40063 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40063 2024-11-10T15:52:34,839 INFO [M:0;c0771061be61:40063 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:34,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,948 INFO [M:0;c0771061be61:40063 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:34,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40063-0x1012572cba70000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:34,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:34,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:34,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:34,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:34,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:34,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:34,994 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:34,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:34,994 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2013058530-172.17.0.3-1731253944422 (Datanode Uuid 16d6c1da-7bdf-4cf0-89f9-b0b1c1e2d7b6) service to localhost/127.0.0.1:38825 2024-11-10T15:52:34,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data5/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:34,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data6/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:34,997 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:34,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:35,000 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:35,000 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:35,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:35,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:35,002 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:35,002 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:35,002 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:35,002 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2013058530-172.17.0.3-1731253944422 (Datanode Uuid aa8cd665-04a1-43eb-becc-07fb607431fd) service to localhost/127.0.0.1:38825 2024-11-10T15:52:35,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data3/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:35,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data4/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:35,004 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:35,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:35,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:35,009 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:35,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:35,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:35,010 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:35,010 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:35,010 WARN [BP-2013058530-172.17.0.3-1731253944422 heartbeating to localhost/127.0.0.1:38825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2013058530-172.17.0.3-1731253944422 (Datanode Uuid 3feff3af-413c-469c-878a-c8503eb3b8e6) service to localhost/127.0.0.1:38825 2024-11-10T15:52:35,010 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:35,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data1/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:35,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/cluster_524e8f52-834f-1d4d-b88d-f07c412ed3e3/data/data2/current/BP-2013058530-172.17.0.3-1731253944422 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:35,011 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:35,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:52:35,019 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:35,019 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:35,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:35,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:35,027 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T15:52:35,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T15:52:35,060 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=91 (was 160), OpenFileDescriptor=445 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=102 (was 67) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7219 (was 7507) 2024-11-10T15:52:35,065 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=91, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=102, ProcessCount=11, AvailableMemoryMB=7220 2024-11-10T15:52:35,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.log.dir so I do NOT create it in target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9518714-706e-8898-c7a5-643a674f268e/hadoop.tmp.dir so I do NOT create it in target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f, deleteOnExit=true 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/test.cache.data in system properties and HBase conf 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir in system properties and HBase conf 2024-11-10T15:52:35,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T15:52:35,067 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T15:52:35,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/nfs.dump.dir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/java.io.tmpdir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T15:52:35,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T15:52:35,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:35,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:35,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:35,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:35,416 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:52:35,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:35,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa34083{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:35,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59bbe271{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:35,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@563c957f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/java.io.tmpdir/jetty-localhost-45417-hadoop-hdfs-3_4_1-tests_jar-_-any-5062426565717066808/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:52:35,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@cbd9f23{HTTP/1.1, (http/1.1)}{localhost:45417} 2024-11-10T15:52:35,513 INFO [Time-limited test {}] server.Server(415): Started @13098ms 2024-11-10T15:52:35,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:35,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:35,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:35,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:35,777 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:35,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6decf963{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:35,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46f2e60d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:35,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@765c7210{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/java.io.tmpdir/jetty-localhost-33955-hadoop-hdfs-3_4_1-tests_jar-_-any-6999336781591563027/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:35,871 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e498d5c{HTTP/1.1, (http/1.1)}{localhost:33955} 2024-11-10T15:52:35,871 INFO [Time-limited test {}] server.Server(415): Started @13456ms 2024-11-10T15:52:35,872 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:35,900 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:35,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:35,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:35,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:35,904 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T15:52:35,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7febc9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:35,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f5c60f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:36,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4be50faa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/java.io.tmpdir/jetty-localhost-44927-hadoop-hdfs-3_4_1-tests_jar-_-any-454497799691069345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:36,001 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6625a4f2{HTTP/1.1, (http/1.1)}{localhost:44927} 2024-11-10T15:52:36,001 INFO [Time-limited test {}] server.Server(415): Started @13587ms 2024-11-10T15:52:36,004 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:36,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T15:52:36,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T15:52:36,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T15:52:36,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T15:52:36,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T15:52:36,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39179133{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,AVAILABLE} 2024-11-10T15:52:36,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d8940e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T15:52:36,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@733029a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/java.io.tmpdir/jetty-localhost-38951-hadoop-hdfs-3_4_1-tests_jar-_-any-4757248300364959956/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:36,141 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a87cd5{HTTP/1.1, (http/1.1)}{localhost:38951} 2024-11-10T15:52:36,141 INFO [Time-limited test {}] server.Server(415): Started @13726ms 2024-11-10T15:52:36,142 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T15:52:37,333 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data1/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,333 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data2/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,353 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ae28c4741a37b11 with lease ID 0x209efef24ca7a309: Processing first storage report for DS-91c25543-5668-4c4b-812e-dc1f9c9e7acf from datanode DatanodeRegistration(127.0.0.1:42219, datanodeUuid=01a9fb75-a036-4f00-beb5-d6b9a96e77ef, infoPort=46737, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ae28c4741a37b11 with lease ID 0x209efef24ca7a309: from storage DS-91c25543-5668-4c4b-812e-dc1f9c9e7acf node DatanodeRegistration(127.0.0.1:42219, datanodeUuid=01a9fb75-a036-4f00-beb5-d6b9a96e77ef, infoPort=46737, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ae28c4741a37b11 with lease ID 0x209efef24ca7a309: Processing first storage report for DS-f5175982-7304-4ec2-98d0-c93f0165b0e5 from datanode DatanodeRegistration(127.0.0.1:42219, datanodeUuid=01a9fb75-a036-4f00-beb5-d6b9a96e77ef, infoPort=46737, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ae28c4741a37b11 with lease ID 0x209efef24ca7a309: from storage DS-f5175982-7304-4ec2-98d0-c93f0165b0e5 node DatanodeRegistration(127.0.0.1:42219, datanodeUuid=01a9fb75-a036-4f00-beb5-d6b9a96e77ef, infoPort=46737, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,450 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data3/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,450 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data4/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,469 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x228e7e75ea00b2ee with lease ID 0x209efef24ca7a30a: Processing first storage report for DS-d503c32f-1872-4f7f-a802-68ab1dcea50f from datanode DatanodeRegistration(127.0.0.1:42771, datanodeUuid=1f7cedca-187b-4c63-b483-fad773c07f46, infoPort=33487, infoSecurePort=0, ipcPort=38515, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x228e7e75ea00b2ee with lease ID 0x209efef24ca7a30a: from storage DS-d503c32f-1872-4f7f-a802-68ab1dcea50f node DatanodeRegistration(127.0.0.1:42771, datanodeUuid=1f7cedca-187b-4c63-b483-fad773c07f46, infoPort=33487, infoSecurePort=0, ipcPort=38515, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x228e7e75ea00b2ee with lease ID 0x209efef24ca7a30a: Processing first storage report for DS-b608b50c-c4fa-40f8-8037-bf08e043771f from datanode DatanodeRegistration(127.0.0.1:42771, datanodeUuid=1f7cedca-187b-4c63-b483-fad773c07f46, infoPort=33487, infoSecurePort=0, ipcPort=38515, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x228e7e75ea00b2ee with lease ID 0x209efef24ca7a30a: from storage DS-b608b50c-c4fa-40f8-8037-bf08e043771f node DatanodeRegistration(127.0.0.1:42771, datanodeUuid=1f7cedca-187b-4c63-b483-fad773c07f46, infoPort=33487, infoSecurePort=0, ipcPort=38515, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,508 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data6/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,508 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data5/current/BP-1914055564-172.17.0.3-1731253955093/current, will proceed with Du for space computation calculation, 2024-11-10T15:52:37,533 WARN [Thread-549 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T15:52:37,536 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x652640e530ba9796 with lease ID 0x209efef24ca7a30b: Processing first storage report for DS-757f4066-0ed1-4b66-b85c-654e7a97a0f5 from datanode DatanodeRegistration(127.0.0.1:33621, datanodeUuid=51ac6d60-b8a8-47c0-bcd0-ee3171ffdb23, infoPort=33607, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,536 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x652640e530ba9796 with lease ID 0x209efef24ca7a30b: from storage DS-757f4066-0ed1-4b66-b85c-654e7a97a0f5 node DatanodeRegistration(127.0.0.1:33621, datanodeUuid=51ac6d60-b8a8-47c0-bcd0-ee3171ffdb23, infoPort=33607, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,536 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x652640e530ba9796 with lease ID 0x209efef24ca7a30b: Processing first storage report for DS-8d723f6c-1ef3-493a-a91c-793162e39dcb from datanode DatanodeRegistration(127.0.0.1:33621, datanodeUuid=51ac6d60-b8a8-47c0-bcd0-ee3171ffdb23, infoPort=33607, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093) 2024-11-10T15:52:37,536 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x652640e530ba9796 with lease ID 0x209efef24ca7a30b: from storage DS-8d723f6c-1ef3-493a-a91c-793162e39dcb node DatanodeRegistration(127.0.0.1:33621, datanodeUuid=51ac6d60-b8a8-47c0-bcd0-ee3171ffdb23, infoPort=33607, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=486920654;c=1731253955093), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T15:52:37,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55 2024-11-10T15:52:37,589 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/zookeeper_0, clientPort=60328, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T15:52:37,590 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60328 2024-11-10T15:52:37,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:52:37,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:52:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741825_1001 (size=7) 2024-11-10T15:52:37,608 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 with version=8 2024-11-10T15:52:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38825/user/jenkins/test-data/27bfd227-f167-a299-2b9b-a730d9dc5017/hbase-staging 2024-11-10T15:52:37,610 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T15:52:37,611 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:37,612 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38919 2024-11-10T15:52:37,614 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38919 connecting to ZooKeeper ensemble=127.0.0.1:60328 2024-11-10T15:52:37,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389190x0, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:37,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38919-0x1012572ef800000 connected 2024-11-10T15:52:37,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:37,784 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234, hbase.cluster.distributed=false 2024-11-10T15:52:37,786 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:37,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38919 2024-11-10T15:52:37,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38919 2024-11-10T15:52:37,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38919 2024-11-10T15:52:37,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38919 2024-11-10T15:52:37,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38919 2024-11-10T15:52:37,804 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:37,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,804 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:37,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:37,805 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:37,805 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:37,805 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37143 2024-11-10T15:52:37,807 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37143 connecting to ZooKeeper ensemble=127.0.0.1:60328 2024-11-10T15:52:37,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371430x0, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:37,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37143-0x1012572ef800001 connected 2024-11-10T15:52:37,823 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:37,823 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:37,824 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:37,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:37,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:37,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37143 2024-11-10T15:52:37,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37143 2024-11-10T15:52:37,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37143 2024-11-10T15:52:37,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37143 2024-11-10T15:52:37,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37143 2024-11-10T15:52:37,846 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:37,846 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:37,847 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36515 2024-11-10T15:52:37,849 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36515 connecting to ZooKeeper ensemble=127.0.0.1:60328 2024-11-10T15:52:37,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365150x0, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:37,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36515-0x1012572ef800002 connected 2024-11-10T15:52:37,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:37,865 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:37,866 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:37,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:37,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:37,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36515 2024-11-10T15:52:37,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36515 2024-11-10T15:52:37,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36515 2024-11-10T15:52:37,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36515 2024-11-10T15:52:37,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36515 2024-11-10T15:52:37,887 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0771061be61:0 server-side Connection retries=45 2024-11-10T15:52:37,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T15:52:37,888 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42457 2024-11-10T15:52:37,890 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42457 connecting to ZooKeeper ensemble=127.0.0.1:60328 2024-11-10T15:52:37,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:37,945 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T15:52:37,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424570x0, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T15:52:37,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424570x0, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:37,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42457-0x1012572ef800003 connected 2024-11-10T15:52:37,966 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T15:52:37,967 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T15:52:37,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T15:52:37,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T15:52:37,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42457 2024-11-10T15:52:37,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42457 2024-11-10T15:52:37,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42457 2024-11-10T15:52:37,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42457 2024-11-10T15:52:37,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42457 2024-11-10T15:52:37,982 DEBUG [M:0;c0771061be61:38919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0771061be61:38919 2024-11-10T15:52:37,984 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0771061be61,38919,1731253957610 2024-11-10T15:52:38,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T15:52:38,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T15:52:38,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,032 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0771061be61,38919,1731253957610 2024-11-10T15:52:38,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:38,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:38,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:38,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,059 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T15:52:38,060 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0771061be61,38919,1731253957610 from backup master directory 2024-11-10T15:52:38,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0771061be61,38919,1731253957610 2024-11-10T15:52:38,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T15:52:38,069 WARN [master/c0771061be61:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:38,069 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0771061be61,38919,1731253957610 2024-11-10T15:52:38,077 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/hbase.id] with ID: 68d7b67c-c582-45c4-94cb-cc5ae8d2c21e 2024-11-10T15:52:38,077 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/.tmp/hbase.id 2024-11-10T15:52:38,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:52:38,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:52:38,088 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/.tmp/hbase.id]:[hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/hbase.id] 2024-11-10T15:52:38,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741826_1002 (size=42) 2024-11-10T15:52:38,107 INFO [master/c0771061be61:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T15:52:38,107 INFO [master/c0771061be61:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T15:52:38,109 INFO [master/c0771061be61:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T15:52:38,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:52:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:52:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741827_1003 (size=196) 2024-11-10T15:52:38,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:52:38,136 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T15:52:38,136 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:52:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:52:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:52:38,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741828_1004 (size=1189) 2024-11-10T15:52:38,150 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store 2024-11-10T15:52:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:52:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:52:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741829_1005 (size=34) 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:52:38,160 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:38,160 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:38,160 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731253958160Disabling compacts and flushes for region at 1731253958160Disabling writes for close at 1731253958160Writing region close event to WAL at 1731253958160Closed at 1731253958160 2024-11-10T15:52:38,161 WARN [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/.initializing 2024-11-10T15:52:38,161 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/WALs/c0771061be61,38919,1731253957610 2024-11-10T15:52:38,165 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C38919%2C1731253957610, suffix=, logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/WALs/c0771061be61,38919,1731253957610, archiveDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/oldWALs, maxLogs=10 2024-11-10T15:52:38,165 INFO [master/c0771061be61:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0771061be61%2C38919%2C1731253957610.1731253958165 2024-11-10T15:52:38,175 INFO [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/WALs/c0771061be61,38919,1731253957610/c0771061be61%2C38919%2C1731253957610.1731253958165 2024-11-10T15:52:38,176 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33607:33607),(127.0.0.1/127.0.0.1:33487:33487),(127.0.0.1/127.0.0.1:46737:46737)] 2024-11-10T15:52:38,177 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:38,177 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:38,177 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,177 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T15:52:38,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:38,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T15:52:38,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:38,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T15:52:38,189 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:38,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T15:52:38,191 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:38,192 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,193 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,193 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,195 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,195 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,195 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:38,196 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T15:52:38,199 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:38,200 INFO [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72461825, jitterRate=0.07976533472537994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:38,201 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731253958178Initializing all the Stores at 1731253958179 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253958179Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253958179Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253958179Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253958179Cleaning up temporary data from old regions at 1731253958195 (+16 ms)Region opened successfully at 1731253958201 (+6 ms) 2024-11-10T15:52:38,201 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T15:52:38,205 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4293cc81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:38,206 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T15:52:38,206 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T15:52:38,206 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T15:52:38,206 INFO [master/c0771061be61:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T15:52:38,207 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T15:52:38,207 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T15:52:38,208 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T15:52:38,210 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T15:52:38,211 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T15:52:38,222 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T15:52:38,222 INFO [master/c0771061be61:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T15:52:38,223 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T15:52:38,232 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T15:52:38,233 INFO [master/c0771061be61:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T15:52:38,234 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T15:52:38,243 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T15:52:38,244 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T15:52:38,253 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T15:52:38,256 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T15:52:38,269 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T15:52:38,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:38,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:38,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:38,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:38,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,280 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0771061be61,38919,1731253957610, sessionid=0x1012572ef800000, setting cluster-up flag (Was=false) 2024-11-10T15:52:38,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,332 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T15:52:38,333 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0771061be61,38919,1731253957610 2024-11-10T15:52:38,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,384 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T15:52:38,386 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0771061be61,38919,1731253957610 2024-11-10T15:52:38,387 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T15:52:38,390 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:38,390 INFO [master/c0771061be61:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T15:52:38,390 INFO [master/c0771061be61:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T15:52:38,390 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0771061be61,38919,1731253957610 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0771061be61:0, corePoolSize=5, maxPoolSize=5 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0771061be61:0, corePoolSize=10, maxPoolSize=10 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:38,392 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,394 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731253988394 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T15:52:38,395 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:38,395 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T15:52:38,395 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,396 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,396 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T15:52:38,400 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T15:52:38,401 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T15:52:38,401 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T15:52:38,401 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T15:52:38,401 INFO [master/c0771061be61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T15:52:38,401 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253958401,5,FailOnTimeoutGroup] 2024-11-10T15:52:38,402 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253958401,5,FailOnTimeoutGroup] 2024-11-10T15:52:38,402 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,402 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T15:52:38,402 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,402 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:52:38,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:52:38,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741831_1007 (size=1321) 2024-11-10T15:52:38,413 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T15:52:38,413 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 2024-11-10T15:52:38,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:52:38,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:52:38,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741832_1008 (size=32) 2024-11-10T15:52:38,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:38,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:52:38,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:52:38,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:38,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:52:38,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:52:38,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:38,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:52:38,436 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:52:38,436 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:38,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:52:38,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:52:38,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:38,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:38,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:52:38,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740 2024-11-10T15:52:38,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740 2024-11-10T15:52:38,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:52:38,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:52:38,443 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:38,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:52:38,447 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:38,448 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63570756, jitterRate=-0.05272191762924194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:38,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731253958427Initializing all the Stores at 1731253958428 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253958428Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253958428Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253958428Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253958428Cleaning up temporary data from old regions at 1731253958443 (+15 ms)Region opened successfully at 1731253958449 (+6 ms) 2024-11-10T15:52:38,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:52:38,449 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:52:38,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:52:38,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:52:38,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:52:38,450 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:38,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731253958449Disabling compacts and flushes for region at 1731253958449Disabling writes for close at 1731253958449Writing region close event to WAL at 1731253958450 (+1 ms)Closed at 1731253958450 2024-11-10T15:52:38,451 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:38,451 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T15:52:38,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T15:52:38,453 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:52:38,455 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T15:52:38,474 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(746): ClusterId : 68d7b67c-c582-45c4-94cb-cc5ae8d2c21e 2024-11-10T15:52:38,474 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(746): ClusterId : 68d7b67c-c582-45c4-94cb-cc5ae8d2c21e 2024-11-10T15:52:38,474 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:38,474 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:38,475 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(746): ClusterId : 68d7b67c-c582-45c4-94cb-cc5ae8d2c21e 2024-11-10T15:52:38,475 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T15:52:38,586 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:38,586 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:38,587 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:38,587 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:38,588 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T15:52:38,588 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T15:52:38,605 WARN [c0771061be61:38919 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T15:52:38,606 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:38,607 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:38,607 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T15:52:38,607 DEBUG [RS:2;c0771061be61:42457 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bd3223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:38,607 DEBUG [RS:0;c0771061be61:37143 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@539a2775, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:38,607 DEBUG [RS:1;c0771061be61:36515 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea71f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0771061be61/172.17.0.3:0 2024-11-10T15:52:38,621 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0771061be61:37143 2024-11-10T15:52:38,621 INFO [RS:0;c0771061be61:37143 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:38,621 INFO [RS:0;c0771061be61:37143 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:38,621 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:38,622 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,38919,1731253957610 with port=37143, startcode=1731253957804 2024-11-10T15:52:38,622 DEBUG [RS:0;c0771061be61:37143 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:38,624 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49669, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:38,625 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,37143,1731253957804 2024-11-10T15:52:38,625 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(517): Registering regionserver=c0771061be61,37143,1731253957804 2024-11-10T15:52:38,627 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c0771061be61:36515 2024-11-10T15:52:38,627 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;c0771061be61:42457 2024-11-10T15:52:38,627 INFO [RS:1;c0771061be61:36515 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:38,627 INFO [RS:2;c0771061be61:42457 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T15:52:38,627 INFO [RS:1;c0771061be61:36515 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:38,627 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:38,627 INFO [RS:2;c0771061be61:42457 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T15:52:38,627 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T15:52:38,627 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 2024-11-10T15:52:38,627 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40651 2024-11-10T15:52:38,627 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:38,628 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,38919,1731253957610 with port=42457, startcode=1731253957887 2024-11-10T15:52:38,628 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0771061be61,38919,1731253957610 with port=36515, startcode=1731253957845 2024-11-10T15:52:38,628 DEBUG [RS:2;c0771061be61:42457 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:38,628 DEBUG [RS:1;c0771061be61:36515 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T15:52:38,629 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:38,630 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35077, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T15:52:38,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,36515,1731253957845 2024-11-10T15:52:38,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(517): Registering regionserver=c0771061be61,36515,1731253957845 2024-11-10T15:52:38,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0771061be61,42457,1731253957887 2024-11-10T15:52:38,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38919 {}] master.ServerManager(517): Registering regionserver=c0771061be61,42457,1731253957887 2024-11-10T15:52:38,632 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 2024-11-10T15:52:38,632 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40651 2024-11-10T15:52:38,633 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:38,635 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 2024-11-10T15:52:38,635 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40651 2024-11-10T15:52:38,635 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T15:52:38,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:38,676 DEBUG [RS:0;c0771061be61:37143 {}] zookeeper.ZKUtil(111): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,37143,1731253957804 2024-11-10T15:52:38,677 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,37143,1731253957804] 2024-11-10T15:52:38,677 WARN [RS:0;c0771061be61:37143 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:38,677 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,36515,1731253957845] 2024-11-10T15:52:38,677 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0771061be61,42457,1731253957887] 2024-11-10T15:52:38,677 DEBUG [RS:1;c0771061be61:36515 {}] zookeeper.ZKUtil(111): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,36515,1731253957845 2024-11-10T15:52:38,677 INFO [RS:0;c0771061be61:37143 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:52:38,677 WARN [RS:1;c0771061be61:36515 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:38,677 DEBUG [RS:2;c0771061be61:42457 {}] zookeeper.ZKUtil(111): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0771061be61,42457,1731253957887 2024-11-10T15:52:38,677 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,37143,1731253957804 2024-11-10T15:52:38,677 INFO [RS:1;c0771061be61:36515 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:52:38,677 WARN [RS:2;c0771061be61:42457 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T15:52:38,678 INFO [RS:2;c0771061be61:42457 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:52:38,678 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,36515,1731253957845 2024-11-10T15:52:38,678 DEBUG [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,42457,1731253957887 2024-11-10T15:52:38,684 INFO [RS:1;c0771061be61:36515 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:38,684 INFO [RS:0;c0771061be61:37143 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:38,688 INFO [RS:0;c0771061be61:37143 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:38,688 INFO [RS:2;c0771061be61:42457 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T15:52:38,688 INFO [RS:0;c0771061be61:37143 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:38,688 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,689 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:38,689 INFO [RS:1;c0771061be61:36515 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:38,690 INFO [RS:1;c0771061be61:36515 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:38,690 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,690 INFO [RS:0;c0771061be61:37143 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:38,690 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,690 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,690 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,690 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,690 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,691 DEBUG [RS:0;c0771061be61:37143 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,692 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:38,693 INFO [RS:1;c0771061be61:36515 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:38,693 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,693 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,694 DEBUG [RS:1;c0771061be61:36515 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,695 INFO [RS:2;c0771061be61:42457 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T15:52:38,696 INFO [RS:2;c0771061be61:42457 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T15:52:38,696 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,696 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,696 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,696 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,697 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,697 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,697 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,37143,1731253957804-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,699 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,36515,1731253957845-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:38,700 INFO [RS:2;c0771061be61:42457 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T15:52:38,700 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0771061be61:0, corePoolSize=2, maxPoolSize=2 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,701 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,702 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,702 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0771061be61:0, corePoolSize=1, maxPoolSize=1 2024-11-10T15:52:38,702 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,702 DEBUG [RS:2;c0771061be61:42457 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0, corePoolSize=3, maxPoolSize=3 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,703 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,42457,1731253957887-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:38,713 INFO [RS:0;c0771061be61:37143 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:38,713 INFO [RS:1;c0771061be61:36515 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:38,713 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,37143,1731253957804-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,713 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,36515,1731253957845-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,713 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,713 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,713 INFO [RS:1;c0771061be61:36515 {}] regionserver.Replication(171): c0771061be61,36515,1731253957845 started 2024-11-10T15:52:38,713 INFO [RS:0;c0771061be61:37143 {}] regionserver.Replication(171): c0771061be61,37143,1731253957804 started 2024-11-10T15:52:38,718 INFO [RS:2;c0771061be61:42457 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T15:52:38,718 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,42457,1731253957887-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,718 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,718 INFO [RS:2;c0771061be61:42457 {}] regionserver.Replication(171): c0771061be61,42457,1731253957887 started 2024-11-10T15:52:38,726 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,726 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,727 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,36515,1731253957845, RpcServer on c0771061be61/172.17.0.3:36515, sessionid=0x1012572ef800002 2024-11-10T15:52:38,727 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,37143,1731253957804, RpcServer on c0771061be61/172.17.0.3:37143, sessionid=0x1012572ef800001 2024-11-10T15:52:38,727 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:38,727 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:38,727 DEBUG [RS:1;c0771061be61:36515 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,36515,1731253957845 2024-11-10T15:52:38,727 DEBUG [RS:0;c0771061be61:37143 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,37143,1731253957804 2024-11-10T15:52:38,727 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,36515,1731253957845' 2024-11-10T15:52:38,727 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,37143,1731253957804' 2024-11-10T15:52:38,727 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:38,727 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,36515,1731253957845 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,36515,1731253957845' 2024-11-10T15:52:38,728 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,37143,1731253957804 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,37143,1731253957804' 2024-11-10T15:52:38,728 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:38,729 DEBUG [RS:1;c0771061be61:36515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:38,729 DEBUG [RS:0;c0771061be61:37143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:38,729 DEBUG [RS:1;c0771061be61:36515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:38,729 INFO [RS:1;c0771061be61:36515 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:38,729 INFO [RS:1;c0771061be61:36515 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:38,729 DEBUG [RS:0;c0771061be61:37143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:38,729 INFO [RS:0;c0771061be61:37143 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:38,729 INFO [RS:0;c0771061be61:37143 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:38,732 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:38,732 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1482): Serving as c0771061be61,42457,1731253957887, RpcServer on c0771061be61/172.17.0.3:42457, sessionid=0x1012572ef800003 2024-11-10T15:52:38,732 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T15:52:38,732 DEBUG [RS:2;c0771061be61:42457 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0771061be61,42457,1731253957887 2024-11-10T15:52:38,733 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,42457,1731253957887' 2024-11-10T15:52:38,733 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T15:52:38,733 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0771061be61,42457,1731253957887 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0771061be61,42457,1731253957887' 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T15:52:38,734 DEBUG [RS:2;c0771061be61:42457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T15:52:38,735 DEBUG [RS:2;c0771061be61:42457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T15:52:38,735 INFO [RS:2;c0771061be61:42457 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T15:52:38,735 INFO [RS:2;c0771061be61:42457 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T15:52:38,832 INFO [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C37143%2C1731253957804, suffix=, logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,37143,1731253957804, archiveDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs, maxLogs=32 2024-11-10T15:52:38,832 INFO [RS:1;c0771061be61:36515 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C36515%2C1731253957845, suffix=, logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,36515,1731253957845, archiveDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs, maxLogs=32 2024-11-10T15:52:38,835 INFO [RS:1;c0771061be61:36515 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0771061be61%2C36515%2C1731253957845.1731253958835 2024-11-10T15:52:38,835 INFO [RS:0;c0771061be61:37143 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0771061be61%2C37143%2C1731253957804.1731253958835 2024-11-10T15:52:38,838 INFO [RS:2;c0771061be61:42457 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C42457%2C1731253957887, suffix=, logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,42457,1731253957887, archiveDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs, maxLogs=32 2024-11-10T15:52:38,840 INFO [RS:2;c0771061be61:42457 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0771061be61%2C42457%2C1731253957887.1731253958839 2024-11-10T15:52:38,847 INFO [RS:1;c0771061be61:36515 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,36515,1731253957845/c0771061be61%2C36515%2C1731253957845.1731253958835 2024-11-10T15:52:38,849 INFO [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,37143,1731253957804/c0771061be61%2C37143%2C1731253957804.1731253958835 2024-11-10T15:52:38,850 DEBUG [RS:1;c0771061be61:36515 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46737:46737),(127.0.0.1/127.0.0.1:33607:33607),(127.0.0.1/127.0.0.1:33487:33487)] 2024-11-10T15:52:38,851 DEBUG [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33487:33487),(127.0.0.1/127.0.0.1:33607:33607),(127.0.0.1/127.0.0.1:46737:46737)] 2024-11-10T15:52:38,853 INFO [RS:2;c0771061be61:42457 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,42457,1731253957887/c0771061be61%2C42457%2C1731253957887.1731253958839 2024-11-10T15:52:38,855 DEBUG [RS:2;c0771061be61:42457 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33607:33607),(127.0.0.1/127.0.0.1:33487:33487),(127.0.0.1/127.0.0.1:46737:46737)] 2024-11-10T15:52:38,855 DEBUG [c0771061be61:38919 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-10T15:52:38,856 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(204): Hosts are {c0771061be61=0} racks are {/default-rack=0} 2024-11-10T15:52:38,858 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:52:38,858 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:52:38,858 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:52:38,858 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:52:38,858 INFO [c0771061be61:38919 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:52:38,858 INFO [c0771061be61:38919 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:52:38,858 DEBUG [c0771061be61:38919 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:52:38,859 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0771061be61,37143,1731253957804 2024-11-10T15:52:38,860 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0771061be61,37143,1731253957804, state=OPENING 2024-11-10T15:52:38,879 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T15:52:38,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:38,891 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T15:52:38,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:38,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:38,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0771061be61,37143,1731253957804}] 2024-11-10T15:52:38,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:38,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:39,047 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:52:39,049 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:52:39,056 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T15:52:39,057 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T15:52:39,061 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0771061be61%2C37143%2C1731253957804.meta, suffix=.meta, logDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,37143,1731253957804, archiveDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs, maxLogs=32 2024-11-10T15:52:39,062 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0771061be61%2C37143%2C1731253957804.meta.1731253959062.meta 2024-11-10T15:52:39,069 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/WALs/c0771061be61,37143,1731253957804/c0771061be61%2C37143%2C1731253957804.meta.1731253959062.meta 2024-11-10T15:52:39,073 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33487:33487),(127.0.0.1/127.0.0.1:46737:46737),(127.0.0.1/127.0.0.1:33607:33607)] 2024-11-10T15:52:39,074 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T15:52:39,075 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T15:52:39,075 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T15:52:39,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T15:52:39,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T15:52:39,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:39,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T15:52:39,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T15:52:39,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:39,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T15:52:39,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T15:52:39,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:39,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T15:52:39,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T15:52:39,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T15:52:39,084 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T15:52:39,085 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740 2024-11-10T15:52:39,086 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740 2024-11-10T15:52:39,087 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T15:52:39,088 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T15:52:39,088 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T15:52:39,090 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T15:52:39,090 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69635763, jitterRate=0.03765372931957245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T15:52:39,091 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T15:52:39,092 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731253959075Writing region info on filesystem at 1731253959075Initializing all the Stores at 1731253959077 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253959077Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253959077Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253959077Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731253959077Cleaning up temporary data from old regions at 1731253959088 (+11 ms)Running coprocessor post-open hooks at 1731253959091 (+3 ms)Region opened successfully at 1731253959091 2024-11-10T15:52:39,093 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731253959046 2024-11-10T15:52:39,097 DEBUG [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T15:52:39,097 INFO [RS_OPEN_META-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T15:52:39,098 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0771061be61,37143,1731253957804 2024-11-10T15:52:39,100 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0771061be61,37143,1731253957804, state=OPEN 2024-11-10T15:52:39,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:39,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:39,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:39,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T15:52:39,111 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0771061be61,37143,1731253957804 2024-11-10T15:52:39,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:39,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:39,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:39,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T15:52:39,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T15:52:39,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0771061be61,37143,1731253957804 in 220 msec 2024-11-10T15:52:39,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T15:52:39,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 665 msec 2024-11-10T15:52:39,121 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T15:52:39,121 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T15:52:39,123 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:52:39,123 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0771061be61,37143,1731253957804, seqNum=-1] 2024-11-10T15:52:39,123 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:39,125 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35613, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:39,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 741 msec 2024-11-10T15:52:39,132 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731253959132, completionTime=-1 2024-11-10T15:52:39,132 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T15:52:39,132 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T15:52:39,134 INFO [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T15:52:39,134 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731254019134 2024-11-10T15:52:39,134 INFO [master/c0771061be61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731254079134 2024-11-10T15:52:39,134 INFO [master/c0771061be61:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0771061be61:38919, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,135 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,137 DEBUG [master/c0771061be61:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T15:52:39,140 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.071sec 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T15:52:39,141 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T15:52:39,144 DEBUG [master/c0771061be61:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T15:52:39,144 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T15:52:39,144 INFO [master/c0771061be61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0771061be61,38919,1731253957610-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T15:52:39,175 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e402b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:39,175 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0771061be61,38919,-1 for getting cluster id 2024-11-10T15:52:39,175 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T15:52:39,177 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '68d7b67c-c582-45c4-94cb-cc5ae8d2c21e' 2024-11-10T15:52:39,178 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T15:52:39,178 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "68d7b67c-c582-45c4-94cb-cc5ae8d2c21e" 2024-11-10T15:52:39,179 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2944a227, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:39,179 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0771061be61,38919,-1] 2024-11-10T15:52:39,179 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T15:52:39,180 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:39,182 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50080, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T15:52:39,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c67938, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T15:52:39,185 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T15:52:39,187 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0771061be61,37143,1731253957804, seqNum=-1] 2024-11-10T15:52:39,188 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:39,190 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43836, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:39,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0771061be61,38919,1731253957610 2024-11-10T15:52:39,195 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T15:52:39,197 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is c0771061be61,38919,1731253957610 2024-11-10T15:52:39,197 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1146814c 2024-11-10T15:52:39,197 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T15:52:39,199 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T15:52:39,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T15:52:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T15:52:39,203 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T15:52:39,203 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T15:52:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:39,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T15:52:39,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:52:39,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:52:39,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741837_1013 (size=392) 2024-11-10T15:52:39,217 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => af16ac139b7974885b61ac966114c059, NAME => 'TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234 2024-11-10T15:52:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:52:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:52:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741838_1014 (size=51) 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing af16ac139b7974885b61ac966114c059, disabling compactions & flushes 2024-11-10T15:52:39,227 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. after waiting 0 ms 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,227 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,227 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for af16ac139b7974885b61ac966114c059: Waiting for close lock at 1731253959227Disabling compacts and flushes for region at 1731253959227Disabling writes for close at 1731253959227Writing region close event to WAL at 1731253959227Closed at 1731253959227 2024-11-10T15:52:39,229 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T15:52:39,229 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731253959229"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731253959229"}]},"ts":"1731253959229"} 2024-11-10T15:52:39,233 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T15:52:39,234 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T15:52:39,234 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731253959234"}]},"ts":"1731253959234"} 2024-11-10T15:52:39,237 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T15:52:39,237 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c0771061be61=0} racks are {/default-rack=0} 2024-11-10T15:52:39,238 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T15:52:39,238 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T15:52:39,238 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T15:52:39,239 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T15:52:39,239 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T15:52:39,239 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T15:52:39,239 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T15:52:39,239 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T15:52:39,239 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T15:52:39,239 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T15:52:39,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=af16ac139b7974885b61ac966114c059, ASSIGN}] 2024-11-10T15:52:39,241 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=af16ac139b7974885b61ac966114c059, ASSIGN 2024-11-10T15:52:39,243 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=af16ac139b7974885b61ac966114c059, ASSIGN; state=OFFLINE, location=c0771061be61,36515,1731253957845; forceNewPlan=false, retain=false 2024-11-10T15:52:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:39,393 INFO [c0771061be61:38919 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T15:52:39,394 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=af16ac139b7974885b61ac966114c059, regionState=OPENING, regionLocation=c0771061be61,36515,1731253957845 2024-11-10T15:52:39,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=af16ac139b7974885b61ac966114c059, ASSIGN because future has completed 2024-11-10T15:52:39,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure af16ac139b7974885b61ac966114c059, server=c0771061be61,36515,1731253957845}] 2024-11-10T15:52:39,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T15:52:39,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T15:52:39,516 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T15:52:39,516 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T15:52:39,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T15:52:39,517 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T15:52:39,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:39,557 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T15:52:39,559 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46605, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T15:52:39,565 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,565 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => af16ac139b7974885b61ac966114c059, NAME => 'TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.', STARTKEY => '', ENDKEY => ''} 2024-11-10T15:52:39,565 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,566 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T15:52:39,566 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,566 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,568 INFO [StoreOpener-af16ac139b7974885b61ac966114c059-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,570 INFO [StoreOpener-af16ac139b7974885b61ac966114c059-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af16ac139b7974885b61ac966114c059 columnFamilyName cf 2024-11-10T15:52:39,570 DEBUG [StoreOpener-af16ac139b7974885b61ac966114c059-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T15:52:39,570 INFO [StoreOpener-af16ac139b7974885b61ac966114c059-1 {}] regionserver.HStore(327): Store=af16ac139b7974885b61ac966114c059/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T15:52:39,570 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,571 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,572 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,572 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,572 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,574 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,576 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T15:52:39,577 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened af16ac139b7974885b61ac966114c059; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74478171, jitterRate=0.10981123149394989}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T15:52:39,577 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:39,578 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for af16ac139b7974885b61ac966114c059: Running coprocessor pre-open hook at 1731253959566Writing region info on filesystem at 1731253959566Initializing all the Stores at 1731253959568 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731253959568Cleaning up temporary data from old regions at 1731253959572 (+4 ms)Running coprocessor post-open hooks at 1731253959577 (+5 ms)Region opened successfully at 1731253959578 (+1 ms) 2024-11-10T15:52:39,579 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059., pid=6, masterSystemTime=1731253959557 2024-11-10T15:52:39,582 DEBUG [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,583 INFO [RS_OPEN_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:39,584 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=af16ac139b7974885b61ac966114c059, regionState=OPEN, openSeqNum=2, regionLocation=c0771061be61,36515,1731253957845 2024-11-10T15:52:39,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure af16ac139b7974885b61ac966114c059, server=c0771061be61,36515,1731253957845 because future has completed 2024-11-10T15:52:39,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T15:52:39,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure af16ac139b7974885b61ac966114c059, server=c0771061be61,36515,1731253957845 in 187 msec 2024-11-10T15:52:39,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T15:52:39,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=af16ac139b7974885b61ac966114c059, ASSIGN in 355 msec 2024-11-10T15:52:39,599 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T15:52:39,600 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731253959599"}]},"ts":"1731253959599"} 2024-11-10T15:52:39,603 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T15:52:39,604 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T15:52:39,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 405 msec 2024-11-10T15:52:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T15:52:39,829 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:52:39,830 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T15:52:39,830 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:52:39,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T15:52:39,835 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T15:52:39,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T15:52:39,843 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059., hostname=c0771061be61,36515,1731253957845, seqNum=2] 2024-11-10T15:52:39,843 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T15:52:39,845 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48910, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T15:52:39,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-10T15:52:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T15:52:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:39,851 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T15:52:39,852 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T15:52:39,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T15:52:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:40,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36515 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T15:52:40,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,008 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing af16ac139b7974885b61ac966114c059 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T15:52:40,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/.tmp/cf/bed741283a244090a88ec2f8ac2871df is 36, key is row/cf:cq/1731253959846/Put/seqid=0 2024-11-10T15:52:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:52:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:52:40,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741839_1015 (size=4787) 2024-11-10T15:52:40,039 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/.tmp/cf/bed741283a244090a88ec2f8ac2871df 2024-11-10T15:52:40,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/.tmp/cf/bed741283a244090a88ec2f8ac2871df as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/cf/bed741283a244090a88ec2f8ac2871df 2024-11-10T15:52:40,058 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/cf/bed741283a244090a88ec2f8ac2871df, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T15:52:40,059 INFO [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for af16ac139b7974885b61ac966114c059 in 51ms, sequenceid=5, compaction requested=false 2024-11-10T15:52:40,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for af16ac139b7974885b61ac966114c059: 2024-11-10T15:52:40,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0771061be61:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T15:52:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T15:52:40,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T15:52:40,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-10T15:52:40,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 220 msec 2024-11-10T15:52:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T15:52:40,170 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T15:52:40,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T15:52:40,176 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:52:40,177 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:40,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,177 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T15:52:40,177 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T15:52:40,177 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=300309102, stopped=false 2024-11-10T15:52:40,177 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0771061be61,38919,1731253957610 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:40,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:40,233 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:52:40,233 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T15:52:40,234 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:40,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:40,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:40,234 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,37143,1731253957804' ***** 2024-11-10T15:52:40,235 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:40,235 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,36515,1731253957845' ***** 2024-11-10T15:52:40,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:40,235 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:40,235 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0771061be61,42457,1731253957887' ***** 2024-11-10T15:52:40,235 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T15:52:40,236 INFO [RS:1;c0771061be61:36515 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:40,236 INFO [RS:0;c0771061be61:37143 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:40,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T15:52:40,236 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:40,236 INFO [RS:1;c0771061be61:36515 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:40,236 INFO [RS:0;c0771061be61:37143 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:40,236 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:40,236 INFO [RS:1;c0771061be61:36515 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:40,236 INFO [RS:0;c0771061be61:37143 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:40,236 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(3091): Received CLOSE for af16ac139b7974885b61ac966114c059 2024-11-10T15:52:40,236 INFO [RS:2;c0771061be61:42457 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T15:52:40,236 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(959): stopping server c0771061be61,37143,1731253957804 2024-11-10T15:52:40,236 INFO [RS:0;c0771061be61:37143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:40,237 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T15:52:40,237 INFO [RS:0;c0771061be61:37143 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0771061be61:37143. 2024-11-10T15:52:40,237 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(959): stopping server c0771061be61,36515,1731253957845 2024-11-10T15:52:40,237 INFO [RS:1;c0771061be61:36515 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:40,237 DEBUG [RS:0;c0771061be61:37143 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:40,237 INFO [RS:2;c0771061be61:42457 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T15:52:40,237 DEBUG [RS:0;c0771061be61:37143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,237 INFO [RS:1;c0771061be61:36515 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c0771061be61:36515. 2024-11-10T15:52:40,237 INFO [RS:2;c0771061be61:42457 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T15:52:40,237 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(959): stopping server c0771061be61,42457,1731253957887 2024-11-10T15:52:40,237 INFO [RS:0;c0771061be61:37143 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:40,237 DEBUG [RS:1;c0771061be61:36515 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:40,237 INFO [RS:2;c0771061be61:42457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:40,237 INFO [RS:0;c0771061be61:37143 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:40,238 DEBUG [RS:1;c0771061be61:36515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,238 INFO [RS:2;c0771061be61:42457 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;c0771061be61:42457. 2024-11-10T15:52:40,238 INFO [RS:0;c0771061be61:37143 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:40,238 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:52:40,238 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1325): Online Regions={af16ac139b7974885b61ac966114c059=TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059.} 2024-11-10T15:52:40,238 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T15:52:40,238 DEBUG [RS:2;c0771061be61:42457 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T15:52:40,238 DEBUG [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1351): Waiting on af16ac139b7974885b61ac966114c059 2024-11-10T15:52:40,238 DEBUG [RS:2;c0771061be61:42457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,238 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(976): stopping server c0771061be61,42457,1731253957887; all regions closed. 2024-11-10T15:52:40,238 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing af16ac139b7974885b61ac966114c059, disabling compactions & flushes 2024-11-10T15:52:40,238 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T15:52:40,238 INFO [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,238 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T15:52:40,238 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,239 DEBUG [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. after waiting 0 ms 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,239 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T15:52:40,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T15:52:40,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T15:52:40,239 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T15:52:40,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,239 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T15:52:40,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:52:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:52:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741835_1011 (size=93) 2024-11-10T15:52:40,246 DEBUG [RS:2;c0771061be61:42457 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0771061be61%2C42457%2C1731253957887:(num 1731253958839) 2024-11-10T15:52:40,246 DEBUG [RS:2;c0771061be61:42457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:40,246 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/default/TestHBaseWalOnEC/af16ac139b7974885b61ac966114c059/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:40,246 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:40,246 INFO [RS:2;c0771061be61:42457 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:40,247 INFO [RS:2;c0771061be61:42457 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:40,247 INFO [RS:2;c0771061be61:42457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:40,247 INFO [RS:2;c0771061be61:42457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42457 2024-11-10T15:52:40,247 INFO [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,247 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for af16ac139b7974885b61ac966114c059: Waiting for close lock at 1731253960238Running coprocessor pre-close hooks at 1731253960238Disabling compacts and flushes for region at 1731253960238Disabling writes for close at 1731253960239 (+1 ms)Writing region close event to WAL at 1731253960240 (+1 ms)Running coprocessor post-close hooks at 1731253960247 (+7 ms)Closed at 1731253960247 2024-11-10T15:52:40,247 DEBUG [RS_CLOSE_REGION-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059. 2024-11-10T15:52:40,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:40,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,42457,1731253957887 2024-11-10T15:52:40,258 INFO [RS:2;c0771061be61:42457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:40,259 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,42457,1731253957887] 2024-11-10T15:52:40,259 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/info/ded464e6e50e4946909064c34d425bbb is 153, key is TestHBaseWalOnEC,,1731253959199.af16ac139b7974885b61ac966114c059./info:regioninfo/1731253959584/Put/seqid=0 2024-11-10T15:52:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:52:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:52:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741840_1016 (size=6637) 2024-11-10T15:52:40,267 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/info/ded464e6e50e4946909064c34d425bbb 2024-11-10T15:52:40,279 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,42457,1731253957887 already deleted, retry=false 2024-11-10T15:52:40,279 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,42457,1731253957887 expired; onlineServers=2 2024-11-10T15:52:40,291 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/ns/52ccdf77bd9940cc9b04ded7d39732bc is 43, key is default/ns:d/1731253959125/Put/seqid=0 2024-11-10T15:52:40,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:52:40,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:52:40,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741841_1017 (size=5153) 2024-11-10T15:52:40,299 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/ns/52ccdf77bd9940cc9b04ded7d39732bc 2024-11-10T15:52:40,300 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,301 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,305 INFO [regionserver/c0771061be61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,322 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/table/7ebe089a41d0430eb664678e49e5c34f is 52, key is TestHBaseWalOnEC/table:state/1731253959599/Put/seqid=0 2024-11-10T15:52:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:52:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:52:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741842_1018 (size=5249) 2024-11-10T15:52:40,330 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/table/7ebe089a41d0430eb664678e49e5c34f 2024-11-10T15:52:40,338 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/info/ded464e6e50e4946909064c34d425bbb as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/info/ded464e6e50e4946909064c34d425bbb 2024-11-10T15:52:40,346 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/info/ded464e6e50e4946909064c34d425bbb, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T15:52:40,348 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/ns/52ccdf77bd9940cc9b04ded7d39732bc as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/ns/52ccdf77bd9940cc9b04ded7d39732bc 2024-11-10T15:52:40,355 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/ns/52ccdf77bd9940cc9b04ded7d39732bc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T15:52:40,356 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/.tmp/table/7ebe089a41d0430eb664678e49e5c34f as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/table/7ebe089a41d0430eb664678e49e5c34f 2024-11-10T15:52:40,364 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/table/7ebe089a41d0430eb664678e49e5c34f, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T15:52:40,365 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false 2024-11-10T15:52:40,369 INFO [RS:2;c0771061be61:42457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:40,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,369 INFO [RS:2;c0771061be61:42457 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,42457,1731253957887; zookeeper connection closed. 2024-11-10T15:52:40,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42457-0x1012572ef800003, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,369 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c7b5429 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c7b5429 2024-11-10T15:52:40,370 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T15:52:40,371 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T15:52:40,371 INFO [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:40,371 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731253960239Running coprocessor pre-close hooks at 1731253960239Disabling compacts and flushes for region at 1731253960239Disabling writes for close at 1731253960239Obtaining lock to block concurrent updates at 1731253960239Preparing flush snapshotting stores in 1588230740 at 1731253960239Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731253960240 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731253960241 (+1 ms)Flushing 1588230740/info: creating writer at 1731253960241Flushing 1588230740/info: appending metadata at 1731253960259 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731253960259Flushing 1588230740/ns: creating writer at 1731253960275 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731253960291 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731253960291Flushing 1588230740/table: creating writer at 1731253960307 (+16 ms)Flushing 1588230740/table: appending metadata at 1731253960322 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731253960322Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7014be12: reopening flushed file at 1731253960337 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cb84fd4: reopening flushed file at 1731253960346 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c661343: reopening flushed file at 1731253960355 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false at 1731253960365 (+10 ms)Writing region close event to WAL at 1731253960366 (+1 ms)Running coprocessor post-close hooks at 1731253960371 (+5 ms)Closed at 1731253960371 2024-11-10T15:52:40,371 DEBUG [RS_CLOSE_META-regionserver/c0771061be61:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T15:52:40,438 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(976): stopping server c0771061be61,36515,1731253957845; all regions closed. 2024-11-10T15:52:40,439 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(976): stopping server c0771061be61,37143,1731253957804; all regions closed. 2024-11-10T15:52:40,439 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,439 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,440 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,440 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741833_1009 (size=1298) 2024-11-10T15:52:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:52:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:52:40,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741836_1012 (size=2751) 2024-11-10T15:52:40,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741833_1009 (size=1298) 2024-11-10T15:52:40,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741833_1009 (size=1298) 2024-11-10T15:52:40,446 DEBUG [RS:1;c0771061be61:36515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs 2024-11-10T15:52:40,446 INFO [RS:1;c0771061be61:36515 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0771061be61%2C36515%2C1731253957845:(num 1731253958835) 2024-11-10T15:52:40,446 DEBUG [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs 2024-11-10T15:52:40,446 INFO [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0771061be61%2C37143%2C1731253957804.meta:.meta(num 1731253959062) 2024-11-10T15:52:40,446 DEBUG [RS:1;c0771061be61:36515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,446 INFO [RS:1;c0771061be61:36515 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,446 INFO [RS:1;c0771061be61:36515 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:40,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,446 INFO [RS:1;c0771061be61:36515 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:40,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,447 INFO [RS:1;c0771061be61:36515 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T15:52:40,447 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:40,447 INFO [RS:1;c0771061be61:36515 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T15:52:40,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,447 INFO [RS:1;c0771061be61:36515 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T15:52:40,447 INFO [RS:1;c0771061be61:36515 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:40,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,447 INFO [RS:1;c0771061be61:36515 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36515 2024-11-10T15:52:40,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741834_1010 (size=93) 2024-11-10T15:52:40,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741834_1010 (size=93) 2024-11-10T15:52:40,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741834_1010 (size=93) 2024-11-10T15:52:40,453 DEBUG [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/oldWALs 2024-11-10T15:52:40,453 INFO [RS:0;c0771061be61:37143 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0771061be61%2C37143%2C1731253957804:(num 1731253958835) 2024-11-10T15:52:40,453 DEBUG [RS:0;c0771061be61:37143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T15:52:40,453 INFO [RS:0;c0771061be61:37143 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T15:52:40,453 INFO [RS:0;c0771061be61:37143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:40,453 INFO [RS:0;c0771061be61:37143 {}] hbase.ChoreService(370): Chore service for: regionserver/c0771061be61:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:40,454 INFO [RS:0;c0771061be61:37143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:40,454 INFO [regionserver/c0771061be61:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:40,454 INFO [RS:0;c0771061be61:37143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37143 2024-11-10T15:52:40,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T15:52:40,458 INFO [RS:1;c0771061be61:36515 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:40,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,36515,1731253957845 2024-11-10T15:52:40,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0771061be61,37143,1731253957804 2024-11-10T15:52:40,469 INFO [RS:0;c0771061be61:37143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:40,479 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,37143,1731253957804] 2024-11-10T15:52:40,500 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,37143,1731253957804 already deleted, retry=false 2024-11-10T15:52:40,500 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,37143,1731253957804 expired; onlineServers=1 2024-11-10T15:52:40,501 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0771061be61,36515,1731253957845] 2024-11-10T15:52:40,511 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0771061be61,36515,1731253957845 already deleted, retry=false 2024-11-10T15:52:40,511 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0771061be61,36515,1731253957845 expired; onlineServers=0 2024-11-10T15:52:40,511 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0771061be61,38919,1731253957610' ***** 2024-11-10T15:52:40,511 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T15:52:40,511 INFO [M:0;c0771061be61:38919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T15:52:40,511 INFO [M:0;c0771061be61:38919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T15:52:40,511 DEBUG [M:0;c0771061be61:38919 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T15:52:40,512 DEBUG [M:0;c0771061be61:38919 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T15:52:40,512 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T15:52:40,512 DEBUG [master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253958401 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.small.0-1731253958401,5,FailOnTimeoutGroup] 2024-11-10T15:52:40,512 DEBUG [master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253958401 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0771061be61:0:becomeActiveMaster-HFileCleaner.large.0-1731253958401,5,FailOnTimeoutGroup] 2024-11-10T15:52:40,512 INFO [M:0;c0771061be61:38919 {}] hbase.ChoreService(370): Chore service for: master/c0771061be61:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T15:52:40,512 INFO [M:0;c0771061be61:38919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T15:52:40,512 DEBUG [M:0;c0771061be61:38919 {}] master.HMaster(1795): Stopping service threads 2024-11-10T15:52:40,512 INFO [M:0;c0771061be61:38919 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T15:52:40,512 INFO [M:0;c0771061be61:38919 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T15:52:40,512 INFO [M:0;c0771061be61:38919 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T15:52:40,512 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T15:52:40,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T15:52:40,521 DEBUG [M:0;c0771061be61:38919 {}] zookeeper.ZKUtil(347): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T15:52:40,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T15:52:40,522 WARN [M:0;c0771061be61:38919 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T15:52:40,522 INFO [M:0;c0771061be61:38919 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/.lastflushedseqids 2024-11-10T15:52:40,525 WARN [IPC Server handler 4 on default port 40651 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T15:52:40,525 WARN [IPC Server handler 4 on default port 40651 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T15:52:40,525 WARN [IPC Server handler 4 on default port 40651 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T15:52:40,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741843_1019 (size=127) 2024-11-10T15:52:40,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741843_1019 (size=127) 2024-11-10T15:52:40,530 INFO [M:0;c0771061be61:38919 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T15:52:40,530 INFO [M:0;c0771061be61:38919 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T15:52:40,530 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T15:52:40,530 INFO [M:0;c0771061be61:38919 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:40,530 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:40,530 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T15:52:40,530 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:40,530 INFO [M:0;c0771061be61:38919 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-10T15:52:40,547 DEBUG [M:0;c0771061be61:38919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea62c35ce1d94d48a544b4b37da4d12f is 82, key is hbase:meta,,1/info:regioninfo/1731253959098/Put/seqid=0 2024-11-10T15:52:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:52:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:52:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741844_1020 (size=5672) 2024-11-10T15:52:40,555 INFO [M:0;c0771061be61:38919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea62c35ce1d94d48a544b4b37da4d12f 2024-11-10T15:52:40,579 DEBUG [M:0;c0771061be61:38919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e12109ae1d09456697efc5ae10673253 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731253959606/Put/seqid=0 2024-11-10T15:52:40,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,579 INFO [RS:1;c0771061be61:36515 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:40,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36515-0x1012572ef800002, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,580 INFO [RS:1;c0771061be61:36515 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,36515,1731253957845; zookeeper connection closed. 2024-11-10T15:52:40,580 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c0498f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c0498f 2024-11-10T15:52:40,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741845_1021 (size=6440) 2024-11-10T15:52:40,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741845_1021 (size=6440) 2024-11-10T15:52:40,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741845_1021 (size=6440) 2024-11-10T15:52:40,587 INFO [M:0;c0771061be61:38919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e12109ae1d09456697efc5ae10673253 2024-11-10T15:52:40,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,590 INFO [RS:0;c0771061be61:37143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:40,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37143-0x1012572ef800001, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,590 INFO [RS:0;c0771061be61:37143 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0771061be61,37143,1731253957804; zookeeper connection closed. 2024-11-10T15:52:40,591 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@125143a4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@125143a4 2024-11-10T15:52:40,591 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T15:52:40,608 DEBUG [M:0;c0771061be61:38919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/955ff41ee6384259ba17cc9d687654d9 is 69, key is c0771061be61,36515,1731253957845/rs:state/1731253958630/Put/seqid=0 2024-11-10T15:52:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:52:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:52:40,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741846_1022 (size=5294) 2024-11-10T15:52:40,618 INFO [M:0;c0771061be61:38919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/955ff41ee6384259ba17cc9d687654d9 2024-11-10T15:52:40,626 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea62c35ce1d94d48a544b4b37da4d12f as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea62c35ce1d94d48a544b4b37da4d12f 2024-11-10T15:52:40,633 INFO [M:0;c0771061be61:38919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea62c35ce1d94d48a544b4b37da4d12f, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T15:52:40,634 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e12109ae1d09456697efc5ae10673253 as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e12109ae1d09456697efc5ae10673253 2024-11-10T15:52:40,641 INFO [M:0;c0771061be61:38919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e12109ae1d09456697efc5ae10673253, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T15:52:40,643 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/955ff41ee6384259ba17cc9d687654d9 as hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/955ff41ee6384259ba17cc9d687654d9 2024-11-10T15:52:40,650 INFO [M:0;c0771061be61:38919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40651/user/jenkins/test-data/2ef80080-29a9-51a5-cb14-e1fae0724234/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/955ff41ee6384259ba17cc9d687654d9, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T15:52:40,652 INFO [M:0;c0771061be61:38919 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=72, compaction requested=false 2024-11-10T15:52:40,653 INFO [M:0;c0771061be61:38919 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T15:52:40,654 DEBUG [M:0;c0771061be61:38919 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731253960530Disabling compacts and flushes for region at 1731253960530Disabling writes for close at 1731253960530Obtaining lock to block concurrent updates at 1731253960530Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731253960530Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731253960531 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731253960532 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731253960532Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731253960547 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731253960547Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731253960562 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731253960578 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731253960578Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731253960593 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731253960608 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731253960608Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a9bcfe4: reopening flushed file at 1731253960625 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d127598: reopening flushed file at 1731253960633 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34b56e03: reopening flushed file at 1731253960642 (+9 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=72, compaction requested=false at 1731253960652 (+10 ms)Writing region close event to WAL at 1731253960653 (+1 ms)Closed at 1731253960653 2024-11-10T15:52:40,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T15:52:40,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42219 is added to blk_1073741830_1006 (size=32686) 2024-11-10T15:52:40,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42771 is added to blk_1073741830_1006 (size=32686) 2024-11-10T15:52:40,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741830_1006 (size=32686) 2024-11-10T15:52:40,659 INFO [M:0;c0771061be61:38919 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T15:52:40,659 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T15:52:40,659 INFO [M:0;c0771061be61:38919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38919 2024-11-10T15:52:40,659 INFO [M:0;c0771061be61:38919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T15:52:40,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,769 INFO [M:0;c0771061be61:38919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T15:52:40,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38919-0x1012572ef800000, quorum=127.0.0.1:60328, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T15:52:40,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@733029a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:40,772 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a87cd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:40,772 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:40,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d8940e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:40,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39179133{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:40,773 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:40,773 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:40,773 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:40,773 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914055564-172.17.0.3-1731253955093 (Datanode Uuid 51ac6d60-b8a8-47c0-bcd0-ee3171ffdb23) service to localhost/127.0.0.1:40651 2024-11-10T15:52:40,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data5/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data6/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,774 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:40,776 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4be50faa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:40,776 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6625a4f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:40,776 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:40,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f5c60f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:40,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7febc9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:40,778 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:40,778 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914055564-172.17.0.3-1731253955093 (Datanode Uuid 1f7cedca-187b-4c63-b483-fad773c07f46) service to localhost/127.0.0.1:40651 2024-11-10T15:52:40,778 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:40,778 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:40,778 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data3/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data4/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,779 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:40,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@765c7210{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T15:52:40,782 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e498d5c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:40,782 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:40,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46f2e60d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:40,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6decf963{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:40,784 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T15:52:40,784 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T15:52:40,784 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T15:52:40,784 WARN [BP-1914055564-172.17.0.3-1731253955093 heartbeating to localhost/127.0.0.1:40651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1914055564-172.17.0.3-1731253955093 (Datanode Uuid 01a9fb75-a036-4f00-beb5-d6b9a96e77ef) service to localhost/127.0.0.1:40651 2024-11-10T15:52:40,784 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data1/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,784 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/cluster_adee98e5-b000-d252-9734-75e0ccdd3b5f/data/data2/current/BP-1914055564-172.17.0.3-1731253955093 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T15:52:40,785 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T15:52:40,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@563c957f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T15:52:40,790 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@cbd9f23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T15:52:40,790 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T15:52:40,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59bbe271{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T15:52:40,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa34083{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f0f86261-819a-ac1e-853e-7d8576d38c55/hadoop.log.dir/,STOPPED} 2024-11-10T15:52:40,797 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T15:52:40,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T15:52:40,827 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=153 (was 91) - Thread LEAK? -, OpenFileDescriptor=516 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=133 (was 102) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7079 (was 7220)