2024-11-08 19:49:53,280 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 19:49:53,297 main DEBUG Took 0.014211 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-08 19:49:53,297 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-08 19:49:53,298 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-08 19:49:53,299 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-08 19:49:53,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,323 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-08 19:49:53,341 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,343 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,344 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,345 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,346 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,346 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,348 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,348 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,349 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,350 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,351 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,352 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,353 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,353 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,354 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,354 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,355 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,356 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,357 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,358 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 19:49:53,360 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,360 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-08 19:49:53,362 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 19:49:53,364 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-08 19:49:53,367 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-08 19:49:53,367 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-08 19:49:53,369 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-08 19:49:53,370 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-08 19:49:53,382 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-08 19:49:53,386 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-08 19:49:53,389 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-08 19:49:53,389 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-08 19:49:53,390 main DEBUG createAppenders(={Console}) 2024-11-08 19:49:53,391 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-08 19:49:53,391 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 19:49:53,392 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-08 19:49:53,393 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-08 19:49:53,393 main DEBUG OutputStream closed 2024-11-08 19:49:53,393 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-08 19:49:53,394 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-08 19:49:53,394 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-08 19:49:53,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-08 19:49:53,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-08 19:49:53,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-08 19:49:53,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-08 19:49:53,500 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-08 19:49:53,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-08 19:49:53,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-08 19:49:53,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-08 19:49:53,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-08 19:49:53,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-08 19:49:53,504 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-08 19:49:53,504 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-08 19:49:53,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-08 19:49:53,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-08 19:49:53,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-08 19:49:53,506 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-08 19:49:53,506 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-08 19:49:53,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-08 19:49:53,511 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08 19:49:53,511 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-08 19:49:53,511 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-08 19:49:53,513 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-08T19:49:53,534 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-08 19:49:53,537 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-08 19:49:53,538 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08T19:49:53,921 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a 2024-11-08T19:49:53,958 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d, deleteOnExit=true 2024-11-08T19:49:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/test.cache.data in system properties and HBase conf 2024-11-08T19:49:53,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T19:49:53,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir in system properties and HBase conf 2024-11-08T19:49:53,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T19:49:53,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T19:49:53,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T19:49:54,096 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-08T19:49:54,228 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T19:49:54,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T19:49:54,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T19:49:54,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T19:49:54,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T19:49:54,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T19:49:54,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T19:49:54,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T19:49:54,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T19:49:54,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T19:49:54,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/nfs.dump.dir in system properties and HBase conf 2024-11-08T19:49:54,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/java.io.tmpdir in system properties and HBase conf 2024-11-08T19:49:54,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T19:49:54,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T19:49:54,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T19:49:55,761 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-08T19:49:55,852 INFO [Time-limited test {}] log.Log(170): Logging initialized @3469ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-08T19:49:55,936 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:49:56,027 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:49:56,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:49:56,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:49:56,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T19:49:56,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:49:56,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:49:56,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:49:56,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/java.io.tmpdir/jetty-localhost-36623-hadoop-hdfs-3_4_1-tests_jar-_-any-3914067588561131696/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T19:49:56,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:36623} 2024-11-08T19:49:56,364 INFO [Time-limited test {}] server.Server(415): Started @3982ms 2024-11-08T19:49:57,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:49:57,344 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:49:57,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:49:57,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:49:57,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T19:49:57,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:49:57,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:49:57,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/java.io.tmpdir/jetty-localhost-39611-hadoop-hdfs-3_4_1-tests_jar-_-any-11418942537441711549/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:49:57,485 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:39611} 2024-11-08T19:49:57,485 INFO [Time-limited test {}] server.Server(415): Started @5103ms 2024-11-08T19:49:57,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:49:57,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:49:57,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:49:57,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:49:57,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:49:57,696 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T19:49:57,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:49:57,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:49:57,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/java.io.tmpdir/jetty-localhost-40329-hadoop-hdfs-3_4_1-tests_jar-_-any-9612925649058300643/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:49:57,847 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:40329} 2024-11-08T19:49:57,847 INFO [Time-limited test {}] server.Server(415): Started @5465ms 2024-11-08T19:49:57,850 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:49:57,939 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:49:57,946 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:49:57,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:49:57,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:49:57,951 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T19:49:57,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:49:57,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:49:58,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/java.io.tmpdir/jetty-localhost-39591-hadoop-hdfs-3_4_1-tests_jar-_-any-3343444922214114083/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:49:58,099 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:39591} 2024-11-08T19:49:58,099 INFO [Time-limited test {}] server.Server(415): Started @5717ms 2024-11-08T19:49:58,101 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:49:59,374 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data3/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,374 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data2/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,374 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data1/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,374 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data4/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,421 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:49:59,421 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:49:59,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ace34f120a92d2e with lease ID 0x809976e21aba1a8a: Processing first storage report for DS-0c84927a-4568-4e73-89cf-73067e93d1e5 from datanode DatanodeRegistration(127.0.0.1:44041, datanodeUuid=3f3cb7cd-3701-4eda-b085-bd31987c54bb, infoPort=46831, infoSecurePort=0, ipcPort=36491, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ace34f120a92d2e with lease ID 0x809976e21aba1a8a: from storage DS-0c84927a-4568-4e73-89cf-73067e93d1e5 node DatanodeRegistration(127.0.0.1:44041, datanodeUuid=3f3cb7cd-3701-4eda-b085-bd31987c54bb, infoPort=46831, infoSecurePort=0, ipcPort=36491, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4515ee922b89b6ff with lease ID 0x809976e21aba1a89: Processing first storage report for DS-f2d7659d-395a-4749-9f43-e1bfba72294e from datanode DatanodeRegistration(127.0.0.1:46163, datanodeUuid=3a5dc330-69cc-4643-b6df-4f5f1fc9b36c, infoPort=32819, infoSecurePort=0, ipcPort=34203, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4515ee922b89b6ff with lease ID 0x809976e21aba1a89: from storage DS-f2d7659d-395a-4749-9f43-e1bfba72294e node DatanodeRegistration(127.0.0.1:46163, datanodeUuid=3a5dc330-69cc-4643-b6df-4f5f1fc9b36c, infoPort=32819, infoSecurePort=0, ipcPort=34203, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,486 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ace34f120a92d2e with lease ID 0x809976e21aba1a8a: Processing first storage report for DS-aef58d3a-ad58-4045-b287-49cf9c7c215c from datanode DatanodeRegistration(127.0.0.1:44041, datanodeUuid=3f3cb7cd-3701-4eda-b085-bd31987c54bb, infoPort=46831, infoSecurePort=0, ipcPort=36491, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ace34f120a92d2e with lease ID 0x809976e21aba1a8a: from storage DS-aef58d3a-ad58-4045-b287-49cf9c7c215c node DatanodeRegistration(127.0.0.1:44041, datanodeUuid=3f3cb7cd-3701-4eda-b085-bd31987c54bb, infoPort=46831, infoSecurePort=0, ipcPort=36491, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4515ee922b89b6ff with lease ID 0x809976e21aba1a89: Processing first storage report for DS-c8c65a3d-937d-412f-b4a3-155ec51875ce from datanode DatanodeRegistration(127.0.0.1:46163, datanodeUuid=3a5dc330-69cc-4643-b6df-4f5f1fc9b36c, infoPort=32819, infoSecurePort=0, ipcPort=34203, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4515ee922b89b6ff with lease ID 0x809976e21aba1a89: from storage DS-c8c65a3d-937d-412f-b4a3-155ec51875ce node DatanodeRegistration(127.0.0.1:46163, datanodeUuid=3a5dc330-69cc-4643-b6df-4f5f1fc9b36c, infoPort=32819, infoSecurePort=0, ipcPort=34203, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,495 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data5/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,496 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data6/current/BP-947710928-172.17.0.2-1731095395196/current, will proceed with Du for space computation calculation, 2024-11-08T19:49:59,521 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:49:59,529 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x555aab87e0dba51f with lease ID 0x809976e21aba1a8b: Processing first storage report for DS-91ae1e86-391f-42af-8bfa-692249e3172a from datanode DatanodeRegistration(127.0.0.1:35749, datanodeUuid=06f1f6c0-85e9-48f7-8cc1-69dc60077454, infoPort=43939, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x555aab87e0dba51f with lease ID 0x809976e21aba1a8b: from storage DS-91ae1e86-391f-42af-8bfa-692249e3172a node DatanodeRegistration(127.0.0.1:35749, datanodeUuid=06f1f6c0-85e9-48f7-8cc1-69dc60077454, infoPort=43939, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,529 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x555aab87e0dba51f with lease ID 0x809976e21aba1a8b: Processing first storage report for DS-ff4ff047-c3d3-4fd6-ab0b-dd287d3a545d from datanode DatanodeRegistration(127.0.0.1:35749, datanodeUuid=06f1f6c0-85e9-48f7-8cc1-69dc60077454, infoPort=43939, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196) 2024-11-08T19:49:59,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x555aab87e0dba51f with lease ID 0x809976e21aba1a8b: from storage DS-ff4ff047-c3d3-4fd6-ab0b-dd287d3a545d node DatanodeRegistration(127.0.0.1:35749, datanodeUuid=06f1f6c0-85e9-48f7-8cc1-69dc60077454, infoPort=43939, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=914686892;c=1731095395196), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:49:59,555 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a 2024-11-08T19:49:59,630 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-08T19:49:59,705 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=162, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=109, ProcessCount=11, AvailableMemoryMB=3623 2024-11-08T19:49:59,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T19:49:59,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-08T19:49:59,840 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/zookeeper_0, clientPort=54537, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T19:49:59,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54537 2024-11-08T19:49:59,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:49:59,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:49:59,992 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:49:59,993 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:00,059 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:41176 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:35749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41176 dst: /127.0.0.1:35749 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:00,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-08T19:50:00,485 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:00,495 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a with version=8 2024-11-08T19:50:00,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/hbase-staging 2024-11-08T19:50:00,619 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-08T19:50:00,913 INFO [Time-limited test {}] client.ConnectionUtils(128): master/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:00,924 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:00,924 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:00,930 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:00,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:00,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:01,094 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T19:50:01,153 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-08T19:50:01,163 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-08T19:50:01,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:01,193 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 92906 (auto-detected) 2024-11-08T19:50:01,194 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-08T19:50:01,217 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45119 2024-11-08T19:50:01,245 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45119 connecting to ZooKeeper ensemble=127.0.0.1:54537 2024-11-08T19:50:01,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451190x0, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:01,383 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45119-0x1011bff89330000 connected 2024-11-08T19:50:01,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:01,502 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a, hbase.cluster.distributed=false 2024-11-08T19:50:01,530 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:01,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45119 2024-11-08T19:50:01,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45119 2024-11-08T19:50:01,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45119 2024-11-08T19:50:01,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45119 2024-11-08T19:50:01,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45119 2024-11-08T19:50:01,653 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:01,655 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,655 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,656 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:01,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:01,659 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:01,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:01,665 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45515 2024-11-08T19:50:01,668 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45515 connecting to ZooKeeper ensemble=127.0.0.1:54537 2024-11-08T19:50:01,670 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455150x0, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:01,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45515-0x1011bff89330001 connected 2024-11-08T19:50:01,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:01,716 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:01,725 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:01,730 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:01,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:01,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45515 2024-11-08T19:50:01,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45515 2024-11-08T19:50:01,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45515 2024-11-08T19:50:01,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45515 2024-11-08T19:50:01,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45515 2024-11-08T19:50:01,770 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:01,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,772 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:01,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:01,772 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:01,773 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:01,774 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33115 2024-11-08T19:50:01,776 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33115 connecting to ZooKeeper ensemble=127.0.0.1:54537 2024-11-08T19:50:01,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,800 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331150x0, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:01,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33115-0x1011bff89330002 connected 2024-11-08T19:50:01,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:01,802 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:01,805 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:01,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:01,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:01,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33115 2024-11-08T19:50:01,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33115 2024-11-08T19:50:01,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33115 2024-11-08T19:50:01,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33115 2024-11-08T19:50:01,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33115 2024-11-08T19:50:01,836 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:01,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,837 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:01,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:01,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:01,838 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:01,838 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:01,839 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43515 2024-11-08T19:50:01,841 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43515 connecting to ZooKeeper ensemble=127.0.0.1:54537 2024-11-08T19:50:01,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:01,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:435150x0, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:01,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:01,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43515-0x1011bff89330003 connected 2024-11-08T19:50:01,866 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:01,867 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:01,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:01,871 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:01,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43515 2024-11-08T19:50:01,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43515 2024-11-08T19:50:01,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43515 2024-11-08T19:50:01,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43515 2024-11-08T19:50:01,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43515 2024-11-08T19:50:01,908 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;692b295ad45c:45119 2024-11-08T19:50:01,910 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/692b295ad45c,45119,1731095400739 2024-11-08T19:50:01,927 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:01,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:01,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:01,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:01,934 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/692b295ad45c,45119,1731095400739 2024-11-08T19:50:02,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,021 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:02,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:02,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:02,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,023 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,024 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T19:50:02,026 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/692b295ad45c,45119,1731095400739 from backup master directory 2024-11-08T19:50:02,108 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:02,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/692b295ad45c,45119,1731095400739 2024-11-08T19:50:02,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:02,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:02,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:02,111 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:02,112 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=692b295ad45c,45119,1731095400739 2024-11-08T19:50:02,114 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-08T19:50:02,115 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-08T19:50:02,194 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/hbase.id] with ID: f9d5de49-8343-46a5-9c3d-7ebb82a45b7d 2024-11-08T19:50:02,194 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/.tmp/hbase.id 2024-11-08T19:50:02,209 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,209 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50134 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50134 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-08T19:50:02,228 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:02,229 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/.tmp/hbase.id]:[hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/hbase.id] 2024-11-08T19:50:02,295 INFO [master/692b295ad45c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:02,300 INFO [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T19:50:02,322 INFO [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-11-08T19:50:02,342 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:02,360 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,360 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:42262 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42262 dst: /127.0.0.1:35749 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-08T19:50:02,383 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:02,406 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T19:50:02,408 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T19:50:02,417 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T19:50:02,475 WARN [IPC Server handler 2 on default port 34095 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-08T19:50:02,475 WARN [IPC Server handler 2 on default port 34095 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T19:50:02,476 WARN [IPC Server handler 2 on default port 34095 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T19:50:02,476 WARN [IPC Server handler 2 on default port 34095 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T19:50:02,481 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-08T19:50:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-08T19:50:02,489 WARN [IPC Server handler 0 on default port 34095 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-08T19:50:02,489 WARN [IPC Server handler 0 on default port 34095 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T19:50:02,489 WARN [IPC Server handler 0 on default port 34095 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T19:50:02,489 WARN [IPC Server handler 0 on default port 34095 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T19:50:02,490 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,497 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,497 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,501 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50172 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50172 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-08T19:50:02,507 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:02,511 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 2024-11-08T19:50:02,514 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 2024-11-08T19:50:02,532 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store 2024-11-08T19:50:02,553 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,554 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:02,557 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:42306 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42306 dst: /127.0.0.1:35749 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-08T19:50:02,571 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:02,575 INFO [master/692b295ad45c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-08T19:50:02,579 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:02,581 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T19:50:02,581 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:02,581 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:02,583 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T19:50:02,583 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:02,583 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:02,585 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731095402580Disabling compacts and flushes for region at 1731095402581 (+1 ms)Disabling writes for close at 1731095402583 (+2 ms)Writing region close event to WAL at 1731095402583Closed at 1731095402583 2024-11-08T19:50:02,594 WARN [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/.initializing 2024-11-08T19:50:02,594 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/WALs/692b295ad45c,45119,1731095400739 2024-11-08T19:50:02,606 INFO [master/692b295ad45c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T19:50:02,632 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C45119%2C1731095400739, suffix=, logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/WALs/692b295ad45c,45119,1731095400739, archiveDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/oldWALs, maxLogs=10 2024-11-08T19:50:02,671 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/WALs/692b295ad45c,45119,1731095400739/692b295ad45c%2C45119%2C1731095400739.1731095402638, exclude list is [], retry=0 2024-11-08T19:50:02,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:02,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35749,DS-91ae1e86-391f-42af-8bfa-692249e3172a,DISK] 2024-11-08T19:50:02,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44041,DS-0c84927a-4568-4e73-89cf-73067e93d1e5,DISK] 2024-11-08T19:50:02,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46163,DS-f2d7659d-395a-4749-9f43-e1bfba72294e,DISK] 2024-11-08T19:50:02,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-08T19:50:02,749 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/WALs/692b295ad45c,45119,1731095400739/692b295ad45c%2C45119%2C1731095400739.1731095402638 2024-11-08T19:50:02,753 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32819:32819),(127.0.0.1/127.0.0.1:46831:46831),(127.0.0.1/127.0.0.1:43939:43939)] 2024-11-08T19:50:02,753 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:02,754 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:02,758 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,759 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T19:50:02,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:02,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:02,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T19:50:02,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:02,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:02,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T19:50:02,880 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:02,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:02,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T19:50:02,890 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:02,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:02,896 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,902 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,904 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,913 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,914 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,921 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:02,927 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:02,947 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:02,949 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61226028, jitterRate=-0.08766108751296997}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:02,957 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731095402776Initializing all the Stores at 1731095402779 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095402780 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095402781 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095402781Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095402782 (+1 ms)Cleaning up temporary data from old regions at 1731095402914 (+132 ms)Region opened successfully at 1731095402957 (+43 ms) 2024-11-08T19:50:02,958 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T19:50:03,009 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66a0f2de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:03,056 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T19:50:03,072 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T19:50:03,072 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T19:50:03,077 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T19:50:03,080 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-08T19:50:03,089 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-11-08T19:50:03,089 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T19:50:03,180 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T19:50:03,201 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T19:50:03,216 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T19:50:03,221 INFO [master/692b295ad45c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T19:50:03,223 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T19:50:03,236 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T19:50:03,241 INFO [master/692b295ad45c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T19:50:03,246 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T19:50:03,258 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T19:50:03,266 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T19:50:03,278 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T19:50:03,309 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T19:50:03,320 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T19:50:03,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:03,337 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:03,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:03,338 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:03,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,346 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=692b295ad45c,45119,1731095400739, sessionid=0x1011bff89330000, setting cluster-up flag (Was=false) 2024-11-08T19:50:03,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,395 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,436 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T19:50:03,440 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=692b295ad45c,45119,1731095400739 2024-11-08T19:50:03,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,468 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:03,499 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T19:50:03,502 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=692b295ad45c,45119,1731095400739 2024-11-08T19:50:03,511 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T19:50:03,604 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(746): ClusterId : f9d5de49-8343-46a5-9c3d-7ebb82a45b7d 2024-11-08T19:50:03,607 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(746): ClusterId : f9d5de49-8343-46a5-9c3d-7ebb82a45b7d 2024-11-08T19:50:03,608 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(746): ClusterId : f9d5de49-8343-46a5-9c3d-7ebb82a45b7d 2024-11-08T19:50:03,608 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:03,608 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:03,608 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:03,615 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:03,629 INFO [master/692b295ad45c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T19:50:03,637 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:03,638 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:03,638 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:03,638 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:03,640 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:03,640 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:03,641 INFO [master/692b295ad45c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T19:50:03,648 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 692b295ad45c,45119,1731095400739 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T19:50:03,679 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:03,680 DEBUG [RS:1;692b295ad45c:33115 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a19704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:03,680 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:03,681 DEBUG [RS:0;692b295ad45c:45515 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd227a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:03,681 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:03,681 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:03,681 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:03,682 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:03,682 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/692b295ad45c:0, corePoolSize=10, maxPoolSize=10 2024-11-08T19:50:03,682 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:03,682 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:03,682 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:03,690 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:03,691 DEBUG [RS:2;692b295ad45c:43515 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3214a898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:03,700 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;692b295ad45c:33115 2024-11-08T19:50:03,703 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;692b295ad45c:45515 2024-11-08T19:50:03,703 INFO [RS:1;692b295ad45c:33115 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:03,703 INFO [RS:0;692b295ad45c:45515 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:03,703 INFO [RS:1;692b295ad45c:33115 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:03,703 INFO [RS:0;692b295ad45c:45515 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:03,703 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:03,703 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:03,706 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,45119,1731095400739 with port=33115, startcode=1731095401770 2024-11-08T19:50:03,720 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,45119,1731095400739 with port=45515, startcode=1731095401615 2024-11-08T19:50:03,722 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:03,723 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T19:50:03,723 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;692b295ad45c:43515 2024-11-08T19:50:03,723 INFO [RS:2;692b295ad45c:43515 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:03,723 INFO [RS:2;692b295ad45c:43515 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:03,724 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:03,725 DEBUG [RS:1;692b295ad45c:33115 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:03,725 DEBUG [RS:0;692b295ad45c:45515 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:03,731 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:03,732 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T19:50:03,737 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,45119,1731095400739 with port=43515, startcode=1731095401835 2024-11-08T19:50:03,742 DEBUG [RS:2;692b295ad45c:43515 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:03,743 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731095433743 2024-11-08T19:50:03,745 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T19:50:03,747 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T19:50:03,751 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T19:50:03,751 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T19:50:03,752 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T19:50:03,752 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T19:50:03,762 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:03,769 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T19:50:03,770 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T19:50:03,770 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T19:50:03,776 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T19:50:03,778 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T19:50:03,792 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095403781,5,FailOnTimeoutGroup] 2024-11-08T19:50:03,797 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:03,800 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55597, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:03,800 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48067, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:03,800 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095403796,5,FailOnTimeoutGroup] 2024-11-08T19:50:03,800 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:03,800 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T19:50:03,802 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:03,802 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:03,802 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:03,802 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:03,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,45515,1731095401615 2024-11-08T19:50:03,809 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,45515,1731095401615 2024-11-08T19:50:03,837 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,43515,1731095401835 2024-11-08T19:50:03,837 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,43515,1731095401835 2024-11-08T19:50:03,842 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a 2024-11-08T19:50:03,843 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34095 2024-11-08T19:50:03,843 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:03,857 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a 2024-11-08T19:50:03,857 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34095 2024-11-08T19:50:03,857 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:03,857 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,33115,1731095401770 2024-11-08T19:50:03,857 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45119 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,33115,1731095401770 2024-11-08T19:50:03,867 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a 2024-11-08T19:50:03,867 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34095 2024-11-08T19:50:03,868 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:03,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:41588 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:44041:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41588 dst: /127.0.0.1:44041 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:03,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-08T19:50:03,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:03,913 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:03,915 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T19:50:03,916 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a 2024-11-08T19:50:03,929 DEBUG [RS:1;692b295ad45c:33115 {}] zookeeper.ZKUtil(111): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,33115,1731095401770 2024-11-08T19:50:03,929 DEBUG [RS:0;692b295ad45c:45515 {}] zookeeper.ZKUtil(111): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,45515,1731095401615 2024-11-08T19:50:03,929 WARN [RS:1;692b295ad45c:33115 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:03,929 WARN [RS:0;692b295ad45c:45515 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:03,929 DEBUG [RS:2;692b295ad45c:43515 {}] zookeeper.ZKUtil(111): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,43515,1731095401835 2024-11-08T19:50:03,929 INFO [RS:1;692b295ad45c:33115 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T19:50:03,929 INFO [RS:0;692b295ad45c:45515 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T19:50:03,929 WARN [RS:2;692b295ad45c:43515 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:03,929 INFO [RS:2;692b295ad45c:43515 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T19:50:03,929 DEBUG [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,45515,1731095401615 2024-11-08T19:50:03,929 DEBUG [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,33115,1731095401770 2024-11-08T19:50:03,930 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835 2024-11-08T19:50:03,930 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,33115,1731095401770] 2024-11-08T19:50:03,931 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,43515,1731095401835] 2024-11-08T19:50:03,931 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,45515,1731095401615] 2024-11-08T19:50:03,968 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:03,968 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:03,985 INFO [RS:0;692b295ad45c:45515 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:04,009 INFO [RS:2;692b295ad45c:43515 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:04,009 INFO [RS:1;692b295ad45c:33115 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:04,029 INFO [RS:1;692b295ad45c:33115 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:04,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50192 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50192 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:04,039 INFO [RS:1;692b295ad45c:33115 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:04,039 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,044 INFO [RS:2;692b295ad45c:43515 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:04,048 INFO [RS:2;692b295ad45c:43515 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:04,049 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,056 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:04,064 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:04,071 INFO [RS:2;692b295ad45c:43515 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:04,072 INFO [RS:1;692b295ad45c:33115 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:04,073 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,073 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,074 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,074 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,074 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,074 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,075 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:04,075 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,075 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,075 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,075 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,075 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,075 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,076 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,076 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:04,076 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,076 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,076 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,077 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,077 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,077 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,077 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,077 DEBUG [RS:1;692b295ad45c:33115 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,075 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,078 DEBUG [RS:2;692b295ad45c:43515 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,084 INFO [RS:0;692b295ad45c:45515 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:04,090 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,090 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,091 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,091 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,091 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,091 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,33115,1731095401770-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:04,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-08T19:50:04,101 INFO [RS:0;692b295ad45c:45515 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:04,101 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,104 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:04,108 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:04,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:04,112 INFO [RS:0;692b295ad45c:45515 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:04,113 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,113 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,113 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,114 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,115 DEBUG [RS:0;692b295ad45c:45515 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:04,134 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,135 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,135 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,135 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,135 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,135 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,43515,1731095401835-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:04,160 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,160 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,161 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,161 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,161 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,161 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45515,1731095401615-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:04,178 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:04,181 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,33115,1731095401770-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,182 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:04,182 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,182 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,43515,1731095401835-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,182 INFO [RS:1;692b295ad45c:33115 {}] regionserver.Replication(171): 692b295ad45c,33115,1731095401770 started 2024-11-08T19:50:04,188 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,188 INFO [RS:2;692b295ad45c:43515 {}] regionserver.Replication(171): 692b295ad45c,43515,1731095401835 started 2024-11-08T19:50:04,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T19:50:04,204 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:04,204 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45515,1731095401615-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,205 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,205 INFO [RS:0;692b295ad45c:45515 {}] regionserver.Replication(171): 692b295ad45c,45515,1731095401615 started 2024-11-08T19:50:04,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T19:50:04,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:04,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:04,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T19:50:04,238 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,239 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,43515,1731095401835, RpcServer on 692b295ad45c/172.17.0.2:43515, sessionid=0x1011bff89330003 2024-11-08T19:50:04,240 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:04,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T19:50:04,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:04,253 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,238 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:04,256 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,45515,1731095401615, RpcServer on 692b295ad45c/172.17.0.2:45515, sessionid=0x1011bff89330001 2024-11-08T19:50:04,257 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:04,257 DEBUG [RS:0;692b295ad45c:45515 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,45515,1731095401615 2024-11-08T19:50:04,258 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,45515,1731095401615' 2024-11-08T19:50:04,258 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:04,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:04,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T19:50:04,262 DEBUG [RS:2;692b295ad45c:43515 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,43515,1731095401835 2024-11-08T19:50:04,263 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,43515,1731095401835' 2024-11-08T19:50:04,263 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:04,263 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,33115,1731095401770, RpcServer on 692b295ad45c/172.17.0.2:33115, sessionid=0x1011bff89330002 2024-11-08T19:50:04,264 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:04,264 DEBUG [RS:1;692b295ad45c:33115 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,33115,1731095401770 2024-11-08T19:50:04,264 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,33115,1731095401770' 2024-11-08T19:50:04,264 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:04,265 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:04,265 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:04,269 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:04,269 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:04,269 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:04,269 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:04,269 DEBUG [RS:0;692b295ad45c:45515 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,45515,1731095401615 2024-11-08T19:50:04,269 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:04,269 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,45515,1731095401615' 2024-11-08T19:50:04,269 DEBUG [RS:2;692b295ad45c:43515 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,43515,1731095401835 2024-11-08T19:50:04,269 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:04,269 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,43515,1731095401835' 2024-11-08T19:50:04,270 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:04,272 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:04,273 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:04,273 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:04,274 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:04,274 DEBUG [RS:1;692b295ad45c:33115 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,33115,1731095401770 2024-11-08T19:50:04,274 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,33115,1731095401770' 2024-11-08T19:50:04,274 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:04,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T19:50:04,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:04,276 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:04,277 DEBUG [RS:1;692b295ad45c:33115 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:04,278 INFO [RS:1;692b295ad45c:33115 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:04,278 INFO [RS:1;692b295ad45c:33115 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:04,279 DEBUG [RS:2;692b295ad45c:43515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:04,279 INFO [RS:2;692b295ad45c:43515 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:04,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:04,279 INFO [RS:2;692b295ad45c:43515 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:04,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T19:50:04,282 DEBUG [RS:0;692b295ad45c:45515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:04,282 INFO [RS:0;692b295ad45c:45515 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:04,282 INFO [RS:0;692b295ad45c:45515 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:04,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T19:50:04,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:04,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:04,306 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T19:50:04,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740 2024-11-08T19:50:04,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740 2024-11-08T19:50:04,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T19:50:04,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T19:50:04,328 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:04,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T19:50:04,363 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:04,365 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63642526, jitterRate=-0.0516524612903595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:04,367 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731095404111Initializing all the Stores at 1731095404118 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095404118Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095404188 (+70 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095404188Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095404188Cleaning up temporary data from old regions at 1731095404326 (+138 ms)Region opened successfully at 1731095404367 (+41 ms) 2024-11-08T19:50:04,367 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T19:50:04,367 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T19:50:04,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T19:50:04,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T19:50:04,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T19:50:04,378 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:04,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731095404367Disabling compacts and flushes for region at 1731095404367Disabling writes for close at 1731095404368 (+1 ms)Writing region close event to WAL at 1731095404377 (+9 ms)Closed at 1731095404377 2024-11-08T19:50:04,389 INFO [RS:0;692b295ad45c:45515 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T19:50:04,389 INFO [RS:2;692b295ad45c:43515 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T19:50:04,390 INFO [RS:1;692b295ad45c:33115 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T19:50:04,390 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:04,391 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T19:50:04,393 INFO [RS:0;692b295ad45c:45515 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C45515%2C1731095401615, suffix=, logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,45515,1731095401615, archiveDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs, maxLogs=32 2024-11-08T19:50:04,397 INFO [RS:1;692b295ad45c:33115 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C33115%2C1731095401770, suffix=, logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,33115,1731095401770, archiveDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs, maxLogs=32 2024-11-08T19:50:04,397 INFO [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C43515%2C1731095401835, suffix=, logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835, archiveDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs, maxLogs=32 2024-11-08T19:50:04,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T19:50:04,420 DEBUG [RS:1;692b295ad45c:33115 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,33115,1731095401770/692b295ad45c%2C33115%2C1731095401770.1731095404401, exclude list is [], retry=0 2024-11-08T19:50:04,426 DEBUG [RS:0;692b295ad45c:45515 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,45515,1731095401615/692b295ad45c%2C45515%2C1731095401615.1731095404396, exclude list is [], retry=0 2024-11-08T19:50:04,426 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T19:50:04,427 DEBUG [RS:2;692b295ad45c:43515 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835/692b295ad45c%2C43515%2C1731095401835.1731095404402, exclude list is [], retry=0 2024-11-08T19:50:04,432 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T19:50:04,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44041,DS-0c84927a-4568-4e73-89cf-73067e93d1e5,DISK] 2024-11-08T19:50:04,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46163,DS-f2d7659d-395a-4749-9f43-e1bfba72294e,DISK] 2024-11-08T19:50:04,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35749,DS-91ae1e86-391f-42af-8bfa-692249e3172a,DISK] 2024-11-08T19:50:04,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44041,DS-0c84927a-4568-4e73-89cf-73067e93d1e5,DISK] 2024-11-08T19:50:04,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35749,DS-91ae1e86-391f-42af-8bfa-692249e3172a,DISK] 2024-11-08T19:50:04,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35749,DS-91ae1e86-391f-42af-8bfa-692249e3172a,DISK] 2024-11-08T19:50:04,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44041,DS-0c84927a-4568-4e73-89cf-73067e93d1e5,DISK] 2024-11-08T19:50:04,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46163,DS-f2d7659d-395a-4749-9f43-e1bfba72294e,DISK] 2024-11-08T19:50:04,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46163,DS-f2d7659d-395a-4749-9f43-e1bfba72294e,DISK] 2024-11-08T19:50:04,524 INFO [RS:1;692b295ad45c:33115 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,33115,1731095401770/692b295ad45c%2C33115%2C1731095401770.1731095404401 2024-11-08T19:50:04,525 INFO [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835/692b295ad45c%2C43515%2C1731095401835.1731095404402 2024-11-08T19:50:04,528 DEBUG [RS:1;692b295ad45c:33115 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43939:43939),(127.0.0.1/127.0.0.1:46831:46831),(127.0.0.1/127.0.0.1:32819:32819)] 2024-11-08T19:50:04,529 INFO [RS:0;692b295ad45c:45515 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,45515,1731095401615/692b295ad45c%2C45515%2C1731095401615.1731095404396 2024-11-08T19:50:04,529 DEBUG [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43939:43939),(127.0.0.1/127.0.0.1:46831:46831),(127.0.0.1/127.0.0.1:32819:32819)] 2024-11-08T19:50:04,531 DEBUG [RS:0;692b295ad45c:45515 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43939:43939),(127.0.0.1/127.0.0.1:32819:32819),(127.0.0.1/127.0.0.1:46831:46831)] 2024-11-08T19:50:04,587 DEBUG [692b295ad45c:45119 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T19:50:04,599 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(204): Hosts are {692b295ad45c=0} racks are {/default-rack=0} 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T19:50:04,612 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T19:50:04,613 INFO [692b295ad45c:45119 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T19:50:04,613 INFO [692b295ad45c:45119 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T19:50:04,613 INFO [692b295ad45c:45119 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T19:50:04,613 DEBUG [692b295ad45c:45119 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T19:50:04,623 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=692b295ad45c,43515,1731095401835 2024-11-08T19:50:04,632 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 692b295ad45c,43515,1731095401835, state=OPENING 2024-11-08T19:50:04,689 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T19:50:04,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:04,699 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:04,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:04,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:04,701 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:04,701 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:04,701 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:04,702 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:04,705 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T19:50:04,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=692b295ad45c,43515,1731095401835}] 2024-11-08T19:50:04,897 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T19:50:04,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33643, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T19:50:04,932 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T19:50:04,933 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T19:50:04,934 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-08T19:50:04,947 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C43515%2C1731095401835.meta, suffix=.meta, logDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835, archiveDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs, maxLogs=32 2024-11-08T19:50:04,969 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835/692b295ad45c%2C43515%2C1731095401835.meta.1731095404950.meta, exclude list is [], retry=0 2024-11-08T19:50:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46163,DS-f2d7659d-395a-4749-9f43-e1bfba72294e,DISK] 2024-11-08T19:50:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35749,DS-91ae1e86-391f-42af-8bfa-692249e3172a,DISK] 2024-11-08T19:50:04,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44041,DS-0c84927a-4568-4e73-89cf-73067e93d1e5,DISK] 2024-11-08T19:50:04,993 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/WALs/692b295ad45c,43515,1731095401835/692b295ad45c%2C43515%2C1731095401835.meta.1731095404950.meta 2024-11-08T19:50:04,996 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46831:46831),(127.0.0.1/127.0.0.1:32819:32819),(127.0.0.1/127.0.0.1:43939:43939)] 2024-11-08T19:50:04,997 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:04,999 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T19:50:05,004 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T19:50:05,010 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T19:50:05,055 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T19:50:05,056 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:05,056 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T19:50:05,057 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T19:50:05,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T19:50:05,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T19:50:05,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:05,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:05,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T19:50:05,073 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T19:50:05,073 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:05,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:05,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T19:50:05,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T19:50:05,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:05,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:05,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T19:50:05,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T19:50:05,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:05,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:05,093 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T19:50:05,095 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740 2024-11-08T19:50:05,100 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740 2024-11-08T19:50:05,112 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T19:50:05,113 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T19:50:05,114 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:05,121 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T19:50:05,126 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74662292, jitterRate=0.11255484819412231}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:05,126 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T19:50:05,128 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731095405057Writing region info on filesystem at 1731095405057Initializing all the Stores at 1731095405063 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095405063Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095405064 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095405064Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095405064Cleaning up temporary data from old regions at 1731095405113 (+49 ms)Running coprocessor post-open hooks at 1731095405126 (+13 ms)Region opened successfully at 1731095405128 (+2 ms) 2024-11-08T19:50:05,135 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731095404881 2024-11-08T19:50:05,148 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T19:50:05,148 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T19:50:05,151 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=692b295ad45c,43515,1731095401835 2024-11-08T19:50:05,153 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 692b295ad45c,43515,1731095401835, state=OPEN 2024-11-08T19:50:05,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:05,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:05,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:05,205 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:05,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:05,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:05,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:05,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:05,205 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=692b295ad45c,43515,1731095401835 2024-11-08T19:50:05,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T19:50:05,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=692b295ad45c,43515,1731095401835 in 498 msec 2024-11-08T19:50:05,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T19:50:05,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 818 msec 2024-11-08T19:50:05,226 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:05,226 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T19:50:05,261 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T19:50:05,262 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=692b295ad45c,43515,1731095401835, seqNum=-1] 2024-11-08T19:50:05,282 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T19:50:05,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36833, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T19:50:05,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.7540 sec 2024-11-08T19:50:05,307 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731095405307, completionTime=-1 2024-11-08T19:50:05,311 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T19:50:05,311 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T19:50:05,339 INFO [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T19:50:05,339 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731095465339 2024-11-08T19:50:05,339 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731095525339 2024-11-08T19:50:05,339 INFO [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-08T19:50:05,341 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-08T19:50:05,348 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,349 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,349 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,351 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-692b295ad45c:45119, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,351 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,356 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,359 DEBUG [master/692b295ad45c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T19:50:05,389 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.277sec 2024-11-08T19:50:05,391 INFO [master/692b295ad45c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T19:50:05,393 INFO [master/692b295ad45c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T19:50:05,394 INFO [master/692b295ad45c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T19:50:05,394 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T19:50:05,395 INFO [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T19:50:05,396 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:05,396 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T19:50:05,401 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T19:50:05,403 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T19:50:05,403 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,45119,1731095400739-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:05,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@106eafae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:05,430 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-08T19:50:05,430 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-08T19:50:05,434 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 692b295ad45c,45119,-1 for getting cluster id 2024-11-08T19:50:05,438 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T19:50:05,450 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9d5de49-8343-46a5-9c3d-7ebb82a45b7d' 2024-11-08T19:50:05,454 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T19:50:05,454 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9d5de49-8343-46a5-9c3d-7ebb82a45b7d" 2024-11-08T19:50:05,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630223d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:05,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [692b295ad45c,45119,-1] 2024-11-08T19:50:05,459 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T19:50:05,462 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-08T19:50:05,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-08T19:50:05,467 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T19:50:05,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-08T19:50:05,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-08T19:50:05,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527aea6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:05,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-08T19:50:05,472 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T19:50:05,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-08T19:50:05,489 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=692b295ad45c,43515,1731095401835, seqNum=-1] 2024-11-08T19:50:05,490 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T19:50:05,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37040, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T19:50:05,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=692b295ad45c,45119,1731095400739 2024-11-08T19:50:05,539 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T19:50:05,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 692b295ad45c,45119,1731095400739 2024-11-08T19:50:05,548 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3837b8ab 2024-11-08T19:50:05,549 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T19:50:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-08T19:50:05,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-08T19:50:05,558 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T19:50:05,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T19:50:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T19:50:05,583 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T19:50:05,585 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:05,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T19:50:05,593 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T19:50:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:05,625 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:05,625 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:05,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50258 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50258 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:05,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-08T19:50:05,642 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:05,653 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6553748716513852e3c3b74d7c09b980, NAME => 'TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a 2024-11-08T19:50:05,661 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:05,661 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:05,675 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50272 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50272 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:05,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-08T19:50:05,692 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:05,692 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:05,693 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 6553748716513852e3c3b74d7c09b980, disabling compactions & flushes 2024-11-08T19:50:05,693 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:05,693 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:05,693 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. after waiting 0 ms 2024-11-08T19:50:05,693 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:05,693 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:05,693 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6553748716513852e3c3b74d7c09b980: Waiting for close lock at 1731095405692Disabling compacts and flushes for region at 1731095405692Disabling writes for close at 1731095405693 (+1 ms)Writing region close event to WAL at 1731095405693Closed at 1731095405693 2024-11-08T19:50:05,696 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T19:50:05,703 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731095405697"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731095405697"}]},"ts":"1731095405697"} 2024-11-08T19:50:05,714 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T19:50:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:05,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T19:50:05,724 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731095405720"}]},"ts":"1731095405720"} 2024-11-08T19:50:05,733 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T19:50:05,734 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {692b295ad45c=0} racks are {/default-rack=0} 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T19:50:05,741 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T19:50:05,741 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T19:50:05,742 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T19:50:05,742 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T19:50:05,742 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T19:50:05,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6553748716513852e3c3b74d7c09b980, ASSIGN}] 2024-11-08T19:50:05,748 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6553748716513852e3c3b74d7c09b980, ASSIGN 2024-11-08T19:50:05,757 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6553748716513852e3c3b74d7c09b980, ASSIGN; state=OFFLINE, location=692b295ad45c,43515,1731095401835; forceNewPlan=false, retain=false 2024-11-08T19:50:05,912 INFO [692b295ad45c:45119 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T19:50:05,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6553748716513852e3c3b74d7c09b980, regionState=OPENING, regionLocation=692b295ad45c,43515,1731095401835 2024-11-08T19:50:05,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6553748716513852e3c3b74d7c09b980, ASSIGN because future has completed 2024-11-08T19:50:05,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6553748716513852e3c3b74d7c09b980, server=692b295ad45c,43515,1731095401835}] 2024-11-08T19:50:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:06,092 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,093 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6553748716513852e3c3b74d7c09b980, NAME => 'TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980.', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:06,093 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,093 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:06,094 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,094 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,100 INFO [StoreOpener-6553748716513852e3c3b74d7c09b980-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,105 INFO [StoreOpener-6553748716513852e3c3b74d7c09b980-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6553748716513852e3c3b74d7c09b980 columnFamilyName cf 2024-11-08T19:50:06,105 DEBUG [StoreOpener-6553748716513852e3c3b74d7c09b980-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:06,107 INFO [StoreOpener-6553748716513852e3c3b74d7c09b980-1 {}] regionserver.HStore(327): Store=6553748716513852e3c3b74d7c09b980/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:06,107 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,109 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,110 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,111 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,111 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,118 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,131 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:06,132 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6553748716513852e3c3b74d7c09b980; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73375537, jitterRate=0.09338070452213287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T19:50:06,133 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,134 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6553748716513852e3c3b74d7c09b980: Running coprocessor pre-open hook at 1731095406094Writing region info on filesystem at 1731095406094Initializing all the Stores at 1731095406096 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095406096Cleaning up temporary data from old regions at 1731095406111 (+15 ms)Running coprocessor post-open hooks at 1731095406133 (+22 ms)Region opened successfully at 1731095406134 (+1 ms) 2024-11-08T19:50:06,141 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980., pid=6, masterSystemTime=1731095406081 2024-11-08T19:50:06,146 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,146 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,157 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6553748716513852e3c3b74d7c09b980, regionState=OPEN, openSeqNum=2, regionLocation=692b295ad45c,43515,1731095401835 2024-11-08T19:50:06,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6553748716513852e3c3b74d7c09b980, server=692b295ad45c,43515,1731095401835 because future has completed 2024-11-08T19:50:06,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T19:50:06,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6553748716513852e3c3b74d7c09b980, server=692b295ad45c,43515,1731095401835 in 250 msec 2024-11-08T19:50:06,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T19:50:06,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6553748716513852e3c3b74d7c09b980, ASSIGN in 449 msec 2024-11-08T19:50:06,205 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T19:50:06,205 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731095406205"}]},"ts":"1731095406205"} 2024-11-08T19:50:06,210 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T19:50:06,212 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T19:50:06,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 641 msec 2024-11-08T19:50:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:06,237 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T19:50:06,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T19:50:06,242 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T19:50:06,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T19:50:06,250 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T19:50:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T19:50:06,268 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980., hostname=692b295ad45c,43515,1731095401835, seqNum=2] 2024-11-08T19:50:06,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-08T19:50:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T19:50:06,297 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T19:50:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:06,302 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T19:50:06,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T19:50:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:06,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43515 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T19:50:06,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,485 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 6553748716513852e3c3b74d7c09b980 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T19:50:06,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/.tmp/cf/c1f4afdeab9743279aa7d770fd0db389 is 36, key is row/cf:cq/1731095406271/Put/seqid=0 2024-11-08T19:50:06,610 WARN [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:06,611 WARN [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:06,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_21799382_22 at /127.0.0.1:41678 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44041:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41678 dst: /127.0.0.1:44041 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-08T19:50:06,631 WARN [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:06,631 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/.tmp/cf/c1f4afdeab9743279aa7d770fd0db389 2024-11-08T19:50:06,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/.tmp/cf/c1f4afdeab9743279aa7d770fd0db389 as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/cf/c1f4afdeab9743279aa7d770fd0db389 2024-11-08T19:50:06,697 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/cf/c1f4afdeab9743279aa7d770fd0db389, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T19:50:06,704 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 6553748716513852e3c3b74d7c09b980 in 219ms, sequenceid=5, compaction requested=false 2024-11-08T19:50:06,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-08T19:50:06,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 6553748716513852e3c3b74d7c09b980: 2024-11-08T19:50:06,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T19:50:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T19:50:06,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T19:50:06,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 408 msec 2024-11-08T19:50:06,724 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 433 msec 2024-11-08T19:50:06,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45119 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:06,926 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T19:50:06,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T19:50:06,942 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T19:50:06,942 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:06,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T19:50:06,950 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T19:50:06,950 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1407995813, stopped=false 2024-11-08T19:50:06,950 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=692b295ad45c,45119,1731095400739 2024-11-08T19:50:06,973 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:06,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:06,973 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T19:50:06,973 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:06,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:06,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:06,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:06,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:06,975 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T19:50:06,975 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:06,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,45515,1731095401615' ***** 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,33115,1731095401770' ***** 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,43515,1731095401835' ***** 2024-11-08T19:50:06,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:06,976 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:06,977 INFO [RS:2;692b295ad45c:43515 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:06,977 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:06,977 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:06,977 INFO [RS:2;692b295ad45c:43515 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:06,977 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:06,977 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:06,977 INFO [RS:1;692b295ad45c:33115 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:06,977 INFO [RS:1;692b295ad45c:33115 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:06,977 INFO [RS:0;692b295ad45c:45515 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:06,977 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,33115,1731095401770 2024-11-08T19:50:06,977 INFO [RS:0;692b295ad45c:45515 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:06,977 INFO [RS:1;692b295ad45c:33115 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:06,977 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(3091): Received CLOSE for 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,977 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,45515,1731095401615 2024-11-08T19:50:06,977 INFO [RS:0;692b295ad45c:45515 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:06,978 INFO [RS:1;692b295ad45c:33115 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;692b295ad45c:33115. 2024-11-08T19:50:06,978 INFO [RS:0;692b295ad45c:45515 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;692b295ad45c:45515. 2024-11-08T19:50:06,978 DEBUG [RS:1;692b295ad45c:33115 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:06,978 DEBUG [RS:1;692b295ad45c:33115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,978 DEBUG [RS:0;692b295ad45c:45515 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:06,978 DEBUG [RS:0;692b295ad45c:45515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,978 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,33115,1731095401770; all regions closed. 2024-11-08T19:50:06,978 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,45515,1731095401615; all regions closed. 2024-11-08T19:50:06,978 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:06,979 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,43515,1731095401835 2024-11-08T19:50:06,979 INFO [RS:2;692b295ad45c:43515 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:06,979 INFO [RS:2;692b295ad45c:43515 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;692b295ad45c:43515. 2024-11-08T19:50:06,979 DEBUG [RS:2;692b295ad45c:43515 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:06,979 DEBUG [RS:2;692b295ad45c:43515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:06,979 INFO [RS:2;692b295ad45c:43515 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:06,980 INFO [RS:2;692b295ad45c:43515 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:06,980 INFO [RS:2;692b295ad45c:43515 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:06,980 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T19:50:06,980 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6553748716513852e3c3b74d7c09b980, disabling compactions & flushes 2024-11-08T19:50:06,980 INFO [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,980 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,980 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. after waiting 0 ms 2024-11-08T19:50:06,980 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T19:50:06,980 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:06,981 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1325): Online Regions={6553748716513852e3c3b74d7c09b980=TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980., 1588230740=hbase:meta,,1.1588230740} 2024-11-08T19:50:06,981 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6553748716513852e3c3b74d7c09b980 2024-11-08T19:50:06,982 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T19:50:06,982 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T19:50:06,982 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T19:50:06,982 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T19:50:06,983 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T19:50:06,983 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T19:50:06,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_1073741828_1018 (size=93) 2024-11-08T19:50:06,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_1073741828_1018 (size=93) 2024-11-08T19:50:06,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_1073741828_1018 (size=93) 2024-11-08T19:50:06,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_1073741826_1016 (size=93) 2024-11-08T19:50:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_1073741826_1016 (size=93) 2024-11-08T19:50:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_1073741826_1016 (size=93) 2024-11-08T19:50:06,998 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,002 DEBUG [RS:0;692b295ad45c:45515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs 2024-11-08T19:50:07,002 INFO [RS:0;692b295ad45c:45515 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 692b295ad45c%2C45515%2C1731095401615:(num 1731095404396) 2024-11-08T19:50:07,002 DEBUG [RS:0;692b295ad45c:45515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:07,002 INFO [RS:0;692b295ad45c:45515 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,002 DEBUG [RS:1;692b295ad45c:33115 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs 2024-11-08T19:50:07,002 INFO [RS:0;692b295ad45c:45515 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:07,002 INFO [RS:1;692b295ad45c:33115 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 692b295ad45c%2C33115%2C1731095401770:(num 1731095404401) 2024-11-08T19:50:07,002 DEBUG [RS:1;692b295ad45c:33115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:07,002 INFO [RS:1;692b295ad45c:33115 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,002 INFO [RS:1;692b295ad45c:33115 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:07,002 INFO [RS:0;692b295ad45c:45515 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:07,003 INFO [RS:1;692b295ad45c:33115 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:07,003 INFO [RS:1;692b295ad45c:33115 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:07,003 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:07,003 INFO [RS:1;692b295ad45c:33115 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:07,003 INFO [RS:1;692b295ad45c:33115 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:07,003 INFO [RS:1;692b295ad45c:33115 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:07,003 INFO [RS:0;692b295ad45c:45515 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:07,003 INFO [RS:0;692b295ad45c:45515 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:07,003 INFO [RS:0;692b295ad45c:45515 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:07,003 INFO [RS:0;692b295ad45c:45515 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:07,004 INFO [RS:0;692b295ad45c:45515 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45515 2024-11-08T19:50:07,004 INFO [RS:1;692b295ad45c:33115 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33115 2024-11-08T19:50:07,004 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:07,006 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,015 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,33115,1731095401770 2024-11-08T19:50:07,015 INFO [RS:1;692b295ad45c:33115 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:07,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:07,017 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/default/TestHBaseWalOnEC/6553748716513852e3c3b74d7c09b980/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T19:50:07,020 INFO [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:07,020 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6553748716513852e3c3b74d7c09b980: Waiting for close lock at 1731095406979Running coprocessor pre-close hooks at 1731095406980 (+1 ms)Disabling compacts and flushes for region at 1731095406980Disabling writes for close at 1731095406980Writing region close event to WAL at 1731095406991 (+11 ms)Running coprocessor post-close hooks at 1731095407019 (+28 ms)Closed at 1731095407020 (+1 ms) 2024-11-08T19:50:07,021 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980. 2024-11-08T19:50:07,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,45515,1731095401615 2024-11-08T19:50:07,026 INFO [RS:0;692b295ad45c:45515 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:07,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,33115,1731095401770] 2024-11-08T19:50:07,039 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/info/76aa0ebb88ea473a87c975447b9dd3d9 is 153, key is TestHBaseWalOnEC,,1731095405560.6553748716513852e3c3b74d7c09b980./info:regioninfo/1731095406156/Put/seqid=0 2024-11-08T19:50:07,043 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,043 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_21799382_22 at /127.0.0.1:41694 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44041:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41694 dst: /127.0.0.1:44041 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:07,057 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,057 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,33115,1731095401770 already deleted, retry=false 2024-11-08T19:50:07,057 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,33115,1731095401770 expired; onlineServers=2 2024-11-08T19:50:07,057 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,45515,1731095401615] 2024-11-08T19:50:07,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-08T19:50:07,060 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:07,061 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/info/76aa0ebb88ea473a87c975447b9dd3d9 2024-11-08T19:50:07,067 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,45515,1731095401615 already deleted, retry=false 2024-11-08T19:50:07,067 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,45515,1731095401615 expired; onlineServers=1 2024-11-08T19:50:07,105 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/ns/e2d6ee072b26408e8d905ba558de857e is 43, key is default/ns:d/1731095405289/Put/seqid=0 2024-11-08T19:50:07,115 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,116 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_21799382_22 at /127.0.0.1:50304 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50304 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:07,135 INFO [regionserver/692b295ad45c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T19:50:07,135 INFO [regionserver/692b295ad45c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T19:50:07,137 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,137 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33115-0x1011bff89330002, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,137 INFO [RS:1;692b295ad45c:33115 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:07,138 INFO [RS:1;692b295ad45c:33115 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,33115,1731095401770; zookeeper connection closed. 2024-11-08T19:50:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-08T19:50:07,140 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:07,140 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/ns/e2d6ee072b26408e8d905ba558de857e 2024-11-08T19:50:07,144 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@96259a4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@96259a4 2024-11-08T19:50:07,147 INFO [RS:0;692b295ad45c:45515 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:07,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,147 INFO [RS:0;692b295ad45c:45515 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,45515,1731095401615; zookeeper connection closed. 2024-11-08T19:50:07,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45515-0x1011bff89330001, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,152 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ff37f83 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ff37f83 2024-11-08T19:50:07,181 DEBUG [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T19:50:07,193 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/table/94a7c83d80114403bbef1bba5ca37a30 is 52, key is TestHBaseWalOnEC/table:state/1731095406205/Put/seqid=0 2024-11-08T19:50:07,196 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,196 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,209 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_21799382_22 at /127.0.0.1:42420 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42420 dst: /127.0.0.1:35749 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:07,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-08T19:50:07,225 WARN [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:07,225 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/table/94a7c83d80114403bbef1bba5ca37a30 2024-11-08T19:50:07,256 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/info/76aa0ebb88ea473a87c975447b9dd3d9 as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/info/76aa0ebb88ea473a87c975447b9dd3d9 2024-11-08T19:50:07,272 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/info/76aa0ebb88ea473a87c975447b9dd3d9, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T19:50:07,274 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/ns/e2d6ee072b26408e8d905ba558de857e as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/ns/e2d6ee072b26408e8d905ba558de857e 2024-11-08T19:50:07,287 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/ns/e2d6ee072b26408e8d905ba558de857e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T19:50:07,289 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/.tmp/table/94a7c83d80114403bbef1bba5ca37a30 as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/table/94a7c83d80114403bbef1bba5ca37a30 2024-11-08T19:50:07,302 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/table/94a7c83d80114403bbef1bba5ca37a30, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T19:50:07,305 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 321ms, sequenceid=11, compaction requested=false 2024-11-08T19:50:07,305 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T19:50:07,317 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T19:50:07,318 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T19:50:07,318 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:07,318 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731095406981Running coprocessor pre-close hooks at 1731095406981Disabling compacts and flushes for region at 1731095406982 (+1 ms)Disabling writes for close at 1731095406983 (+1 ms)Obtaining lock to block concurrent updates at 1731095406983Preparing flush snapshotting stores in 1588230740 at 1731095406983Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731095406986 (+3 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731095406988 (+2 ms)Flushing 1588230740/info: creating writer at 1731095406989 (+1 ms)Flushing 1588230740/info: appending metadata at 1731095407030 (+41 ms)Flushing 1588230740/info: closing flushed file at 1731095407030Flushing 1588230740/ns: creating writer at 1731095407076 (+46 ms)Flushing 1588230740/ns: appending metadata at 1731095407102 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1731095407103 (+1 ms)Flushing 1588230740/table: creating writer at 1731095407156 (+53 ms)Flushing 1588230740/table: appending metadata at 1731095407192 (+36 ms)Flushing 1588230740/table: closing flushed file at 1731095407192Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26a27715: reopening flushed file at 1731095407253 (+61 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5794f950: reopening flushed file at 1731095407272 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59c3da3d: reopening flushed file at 1731095407288 (+16 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 321ms, sequenceid=11, compaction requested=false at 1731095407305 (+17 ms)Writing region close event to WAL at 1731095407307 (+2 ms)Running coprocessor post-close hooks at 1731095407318 (+11 ms)Closed at 1731095407318 2024-11-08T19:50:07,318 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:07,381 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,43515,1731095401835; all regions closed. 2024-11-08T19:50:07,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_1073741829_1019 (size=2751) 2024-11-08T19:50:07,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_1073741829_1019 (size=2751) 2024-11-08T19:50:07,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_1073741829_1019 (size=2751) 2024-11-08T19:50:07,390 DEBUG [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs 2024-11-08T19:50:07,390 INFO [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 692b295ad45c%2C43515%2C1731095401835.meta:.meta(num 1731095404950) 2024-11-08T19:50:07,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_1073741827_1017 (size=1298) 2024-11-08T19:50:07,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_1073741827_1017 (size=1298) 2024-11-08T19:50:07,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_1073741827_1017 (size=1298) 2024-11-08T19:50:07,399 DEBUG [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/oldWALs 2024-11-08T19:50:07,399 INFO [RS:2;692b295ad45c:43515 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 692b295ad45c%2C43515%2C1731095401835:(num 1731095404402) 2024-11-08T19:50:07,399 DEBUG [RS:2;692b295ad45c:43515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:07,399 INFO [RS:2;692b295ad45c:43515 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:07,399 INFO [RS:2;692b295ad45c:43515 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:07,399 INFO [RS:2;692b295ad45c:43515 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:07,399 INFO [RS:2;692b295ad45c:43515 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:07,400 INFO [RS:2;692b295ad45c:43515 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43515 2024-11-08T19:50:07,400 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,43515,1731095401835 2024-11-08T19:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:07,446 INFO [RS:2;692b295ad45c:43515 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:07,447 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,43515,1731095401835] 2024-11-08T19:50:07,541 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,43515,1731095401835 already deleted, retry=false 2024-11-08T19:50:07,541 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,43515,1731095401835 expired; onlineServers=0 2024-11-08T19:50:07,541 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '692b295ad45c,45119,1731095400739' ***** 2024-11-08T19:50:07,541 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T19:50:07,541 INFO [M:0;692b295ad45c:45119 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:07,542 INFO [M:0;692b295ad45c:45119 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:07,542 DEBUG [M:0;692b295ad45c:45119 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T19:50:07,542 DEBUG [M:0;692b295ad45c:45119 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T19:50:07,542 DEBUG [master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095403781 {}] cleaner.HFileCleaner(306): Exit Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095403781,5,FailOnTimeoutGroup] 2024-11-08T19:50:07,542 DEBUG [master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095403796 {}] cleaner.HFileCleaner(306): Exit Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095403796,5,FailOnTimeoutGroup] 2024-11-08T19:50:07,542 INFO [M:0;692b295ad45c:45119 {}] hbase.ChoreService(370): Chore service for: master/692b295ad45c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:07,542 INFO [M:0;692b295ad45c:45119 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:07,542 DEBUG [M:0;692b295ad45c:45119 {}] master.HMaster(1795): Stopping service threads 2024-11-08T19:50:07,542 INFO [M:0;692b295ad45c:45119 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T19:50:07,542 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T19:50:07,543 INFO [M:0;692b295ad45c:45119 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T19:50:07,543 INFO [M:0;692b295ad45c:45119 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T19:50:07,543 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T19:50:07,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:07,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:07,552 DEBUG [M:0;692b295ad45c:45119 {}] zookeeper.ZKUtil(347): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T19:50:07,552 WARN [M:0;692b295ad45c:45119 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T19:50:07,553 INFO [M:0;692b295ad45c:45119 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/.lastflushedseqids 2024-11-08T19:50:07,563 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,564 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50316 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50316 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:07,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-08T19:50:07,585 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:07,586 INFO [M:0;692b295ad45c:45119 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T19:50:07,586 INFO [M:0;692b295ad45c:45119 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T19:50:07,586 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T19:50:07,586 INFO [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:07,586 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:07,587 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T19:50:07,587 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:07,587 INFO [M:0;692b295ad45c:45119 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-08T19:50:07,620 DEBUG [M:0;692b295ad45c:45119 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/759bfde114344171a383990d2d66994e is 82, key is hbase:meta,,1/info:regioninfo/1731095405150/Put/seqid=0 2024-11-08T19:50:07,626 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,626 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:07,631 INFO [RS:2;692b295ad45c:43515 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,631 INFO [RS:2;692b295ad45c:43515 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,43515,1731095401835; zookeeper connection closed. 2024-11-08T19:50:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43515-0x1011bff89330003, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:07,636 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f99a20c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f99a20c 2024-11-08T19:50:07,637 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T19:50:07,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:41718 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44041:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41718 dst: /127.0.0.1:44041 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:07,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-08T19:50:08,051 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:08,051 INFO [M:0;692b295ad45c:45119 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/759bfde114344171a383990d2d66994e 2024-11-08T19:50:08,086 DEBUG [M:0;692b295ad45c:45119 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4b647c46ab743e7a188362c2ec20155 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731095406215/Put/seqid=0 2024-11-08T19:50:08,089 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:08,089 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:08,093 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50338 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50338 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:08,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775552_1037 (size=6439) 2024-11-08T19:50:08,100 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:08,101 INFO [M:0;692b295ad45c:45119 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4b647c46ab743e7a188362c2ec20155 2024-11-08T19:50:08,137 DEBUG [M:0;692b295ad45c:45119 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/09c6ca0dbc3c4db6829f3795a70ab2cf is 69, key is 692b295ad45c,33115,1731095401770/rs:state/1731095403858/Put/seqid=0 2024-11-08T19:50:08,139 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:08,139 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T19:50:08,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1254849551_22 at /127.0.0.1:50356 [Receiving block BP-947710928-172.17.0.2-1731095395196:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50356 dst: /127.0.0.1:46163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T19:50:08,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-08T19:50:08,264 WARN [M:0;692b295ad45c:45119 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T19:50:08,264 INFO [M:0;692b295ad45c:45119 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/09c6ca0dbc3c4db6829f3795a70ab2cf 2024-11-08T19:50:08,274 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/759bfde114344171a383990d2d66994e as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/759bfde114344171a383990d2d66994e 2024-11-08T19:50:08,284 INFO [M:0;692b295ad45c:45119 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/759bfde114344171a383990d2d66994e, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T19:50:08,286 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4b647c46ab743e7a188362c2ec20155 as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4b647c46ab743e7a188362c2ec20155 2024-11-08T19:50:08,297 INFO [M:0;692b295ad45c:45119 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4b647c46ab743e7a188362c2ec20155, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T19:50:08,298 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/09c6ca0dbc3c4db6829f3795a70ab2cf as hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/09c6ca0dbc3c4db6829f3795a70ab2cf 2024-11-08T19:50:08,307 INFO [M:0;692b295ad45c:45119 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/09c6ca0dbc3c4db6829f3795a70ab2cf, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T19:50:08,309 INFO [M:0;692b295ad45c:45119 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 722ms, sequenceid=72, compaction requested=false 2024-11-08T19:50:08,310 INFO [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:08,311 DEBUG [M:0;692b295ad45c:45119 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731095407586Disabling compacts and flushes for region at 1731095407586Disabling writes for close at 1731095407587 (+1 ms)Obtaining lock to block concurrent updates at 1731095407587Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731095407587Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731095407588 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731095407593 (+5 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731095407593Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731095407619 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731095407619Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731095408063 (+444 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731095408085 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731095408085Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731095408113 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731095408137 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731095408137Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22cfa78d: reopening flushed file at 1731095408272 (+135 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@365e1ce0: reopening flushed file at 1731095408284 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3982b51: reopening flushed file at 1731095408297 (+13 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 722ms, sequenceid=72, compaction requested=false at 1731095408309 (+12 ms)Writing region close event to WAL at 1731095408310 (+1 ms)Closed at 1731095408310 2024-11-08T19:50:08,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_1073741825_1011 (size=32674) 2024-11-08T19:50:08,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_1073741825_1011 (size=32674) 2024-11-08T19:50:08,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_1073741825_1011 (size=32674) 2024-11-08T19:50:08,315 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:08,315 INFO [M:0;692b295ad45c:45119 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T19:50:08,315 INFO [M:0;692b295ad45c:45119 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45119 2024-11-08T19:50:08,316 INFO [M:0;692b295ad45c:45119 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:08,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-08T19:50:08,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46163 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-08T19:50:08,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-08T19:50:08,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-08T19:50:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775676_1021 (size=392) 2024-11-08T19:50:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775677_1021 (size=392) 2024-11-08T19:50:08,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35749 is added to blk_-9223372036854775661_1023 (size=51) 2024-11-08T19:50:08,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44041 is added to blk_-9223372036854775660_1023 (size=51) 2024-11-08T19:50:08,622 INFO [M:0;692b295ad45c:45119 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:08,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:08,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45119-0x1011bff89330000, quorum=127.0.0.1:54537, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:08,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:08,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:08,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:08,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:08,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:08,693 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:08,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:08,693 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-947710928-172.17.0.2-1731095395196 (Datanode Uuid 06f1f6c0-85e9-48f7-8cc1-69dc60077454) service to localhost/127.0.0.1:34095 2024-11-08T19:50:08,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:08,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data5/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data6/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,695 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:08,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:08,697 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:08,697 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:08,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:08,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:08,700 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:08,700 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:08,700 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:08,700 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-947710928-172.17.0.2-1731095395196 (Datanode Uuid 3f3cb7cd-3701-4eda-b085-bd31987c54bb) service to localhost/127.0.0.1:34095 2024-11-08T19:50:08,700 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data3/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data4/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,701 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:08,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:08,703 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:08,703 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:08,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:08,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:08,706 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:08,706 WARN [BP-947710928-172.17.0.2-1731095395196 heartbeating to localhost/127.0.0.1:34095 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-947710928-172.17.0.2-1731095395196 (Datanode Uuid 3a5dc330-69cc-4643-b6df-4f5f1fc9b36c) service to localhost/127.0.0.1:34095 2024-11-08T19:50:08,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data1/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/cluster_016ada73-ccc6-e170-d67f-7b5f9a0dfd5d/data/data2/current/BP-947710928-172.17.0.2-1731095395196 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:08,707 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:08,707 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:08,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:08,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T19:50:08,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:08,721 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:08,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:08,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:08,731 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T19:50:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T19:50:08,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=89 (was 162), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 109) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2919 (was 3623) 2024-11-08T19:50:08,790 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=89, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=2919 2024-11-08T19:50:08,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.log.dir so I do NOT create it in target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/851efe14-8265-645e-6804-ce7b460be40a/hadoop.tmp.dir so I do NOT create it in target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac, deleteOnExit=true 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/test.cache.data in system properties and HBase conf 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir in system properties and HBase conf 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T19:50:08,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T19:50:08,792 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T19:50:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T19:50:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/nfs.dump.dir in system properties and HBase conf 2024-11-08T19:50:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/java.io.tmpdir in system properties and HBase conf 2024-11-08T19:50:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T19:50:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T19:50:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T19:50:09,182 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:50:09,189 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:50:09,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:50:09,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:50:09,192 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T19:50:09,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:50:09,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59bbe271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:50:09,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77ad49ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:50:09,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75925886{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/java.io.tmpdir/jetty-localhost-41679-hadoop-hdfs-3_4_1-tests_jar-_-any-9069020173494068361/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T19:50:09,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f1e7dec{HTTP/1.1, (http/1.1)}{localhost:41679} 2024-11-08T19:50:09,326 INFO [Time-limited test {}] server.Server(415): Started @16944ms 2024-11-08T19:50:09,608 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:50:09,617 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:50:09,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:50:09,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:50:09,621 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T19:50:09,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46f2e60d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:50:09,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@436188c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:50:09,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6419fd60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/java.io.tmpdir/jetty-localhost-33043-hadoop-hdfs-3_4_1-tests_jar-_-any-13271803226673865746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:09,740 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@356b0e7e{HTTP/1.1, (http/1.1)}{localhost:33043} 2024-11-08T19:50:09,740 INFO [Time-limited test {}] server.Server(415): Started @17358ms 2024-11-08T19:50:09,742 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:50:09,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:50:09,787 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:50:09,788 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:50:09,788 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:50:09,788 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T19:50:09,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f5c60f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:50:09,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ba59100{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:50:09,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c49e2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/java.io.tmpdir/jetty-localhost-40565-hadoop-hdfs-3_4_1-tests_jar-_-any-6441107614269250492/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:09,907 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74a88c50{HTTP/1.1, (http/1.1)}{localhost:40565} 2024-11-08T19:50:09,907 INFO [Time-limited test {}] server.Server(415): Started @17526ms 2024-11-08T19:50:09,909 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:50:09,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T19:50:09,961 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T19:50:09,962 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T19:50:09,962 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T19:50:09,962 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T19:50:09,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d8940e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,AVAILABLE} 2024-11-08T19:50:09,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a2b6be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T19:50:10,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:10,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:10,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:10,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@165796ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/java.io.tmpdir/jetty-localhost-39751-hadoop-hdfs-3_4_1-tests_jar-_-any-10581930381837213263/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:10,105 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@176751e3{HTTP/1.1, (http/1.1)}{localhost:39751} 2024-11-08T19:50:10,105 INFO [Time-limited test {}] server.Server(415): Started @17723ms 2024-11-08T19:50:10,107 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T19:50:10,564 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T19:50:10,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:10,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:10,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T19:50:11,197 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data2/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,197 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data1/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,218 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:50:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41c8bbd47a0efb07 with lease ID 0x438c2e1976c9debf: Processing first storage report for DS-8a54ea70-ac1b-43be-b375-eff9ed620bd9 from datanode DatanodeRegistration(127.0.0.1:37483, datanodeUuid=192c6f89-a32b-4095-aa3a-62101146b2ea, infoPort=41401, infoSecurePort=0, ipcPort=33175, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41c8bbd47a0efb07 with lease ID 0x438c2e1976c9debf: from storage DS-8a54ea70-ac1b-43be-b375-eff9ed620bd9 node DatanodeRegistration(127.0.0.1:37483, datanodeUuid=192c6f89-a32b-4095-aa3a-62101146b2ea, infoPort=41401, infoSecurePort=0, ipcPort=33175, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41c8bbd47a0efb07 with lease ID 0x438c2e1976c9debf: Processing first storage report for DS-d6f93141-5cc6-4e39-9da4-32892597c9cf from datanode DatanodeRegistration(127.0.0.1:37483, datanodeUuid=192c6f89-a32b-4095-aa3a-62101146b2ea, infoPort=41401, infoSecurePort=0, ipcPort=33175, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41c8bbd47a0efb07 with lease ID 0x438c2e1976c9debf: from storage DS-d6f93141-5cc6-4e39-9da4-32892597c9cf node DatanodeRegistration(127.0.0.1:37483, datanodeUuid=192c6f89-a32b-4095-aa3a-62101146b2ea, infoPort=41401, infoSecurePort=0, ipcPort=33175, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,486 WARN [Thread-600 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data3/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,486 WARN [Thread-601 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data4/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,506 WARN [Thread-551 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:50:11,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7e8d4e3d3c26a9 with lease ID 0x438c2e1976c9dec0: Processing first storage report for DS-864a86b8-2138-4c79-be60-42c14812d1c1 from datanode DatanodeRegistration(127.0.0.1:42907, datanodeUuid=92614bba-ef2d-4b1d-a465-2ae644e7fb31, infoPort=43819, infoSecurePort=0, ipcPort=42709, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7e8d4e3d3c26a9 with lease ID 0x438c2e1976c9dec0: from storage DS-864a86b8-2138-4c79-be60-42c14812d1c1 node DatanodeRegistration(127.0.0.1:42907, datanodeUuid=92614bba-ef2d-4b1d-a465-2ae644e7fb31, infoPort=43819, infoSecurePort=0, ipcPort=42709, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7e8d4e3d3c26a9 with lease ID 0x438c2e1976c9dec0: Processing first storage report for DS-0c169481-1644-4491-9bfc-6ded87cc4de2 from datanode DatanodeRegistration(127.0.0.1:42907, datanodeUuid=92614bba-ef2d-4b1d-a465-2ae644e7fb31, infoPort=43819, infoSecurePort=0, ipcPort=42709, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7e8d4e3d3c26a9 with lease ID 0x438c2e1976c9dec0: from storage DS-0c169481-1644-4491-9bfc-6ded87cc4de2 node DatanodeRegistration(127.0.0.1:42907, datanodeUuid=92614bba-ef2d-4b1d-a465-2ae644e7fb31, infoPort=43819, infoSecurePort=0, ipcPort=42709, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,604 WARN [Thread-611 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data5/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,604 WARN [Thread-612 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data6/current/BP-161215812-172.17.0.2-1731095408818/current, will proceed with Du for space computation calculation, 2024-11-08T19:50:11,619 WARN [Thread-573 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T19:50:11,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96e0e9e2640fa565 with lease ID 0x438c2e1976c9dec1: Processing first storage report for DS-a084126a-b70a-436f-bb93-41ccbb6f179d from datanode DatanodeRegistration(127.0.0.1:45381, datanodeUuid=f730e582-a07e-4f3e-8b19-2ca87460b617, infoPort=45311, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96e0e9e2640fa565 with lease ID 0x438c2e1976c9dec1: from storage DS-a084126a-b70a-436f-bb93-41ccbb6f179d node DatanodeRegistration(127.0.0.1:45381, datanodeUuid=f730e582-a07e-4f3e-8b19-2ca87460b617, infoPort=45311, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96e0e9e2640fa565 with lease ID 0x438c2e1976c9dec1: Processing first storage report for DS-31f2fb4f-5dc6-4396-baa2-fea470eb8c69 from datanode DatanodeRegistration(127.0.0.1:45381, datanodeUuid=f730e582-a07e-4f3e-8b19-2ca87460b617, infoPort=45311, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818) 2024-11-08T19:50:11,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96e0e9e2640fa565 with lease ID 0x438c2e1976c9dec1: from storage DS-31f2fb4f-5dc6-4396-baa2-fea470eb8c69 node DatanodeRegistration(127.0.0.1:45381, datanodeUuid=f730e582-a07e-4f3e-8b19-2ca87460b617, infoPort=45311, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=364326427;c=1731095408818), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T19:50:11,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2 2024-11-08T19:50:11,670 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/zookeeper_0, clientPort=50222, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T19:50:11,671 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50222 2024-11-08T19:50:11,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741825_1001 (size=7) 2024-11-08T19:50:11,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741825_1001 (size=7) 2024-11-08T19:50:11,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741825_1001 (size=7) 2024-11-08T19:50:11,692 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 with version=8 2024-11-08T19:50:11,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34095/user/jenkins/test-data/77816e69-4018-94c8-e55c-84abaf702c3a/hbase-staging 2024-11-08T19:50:11,695 INFO [Time-limited test {}] client.ConnectionUtils(128): master/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T19:50:11,696 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:11,697 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36033 2024-11-08T19:50:11,699 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36033 connecting to ZooKeeper ensemble=127.0.0.1:50222 2024-11-08T19:50:11,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360330x0, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:11,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36033-0x1011bffb7800000 connected 2024-11-08T19:50:11,869 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,875 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:11,875 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1, hbase.cluster.distributed=false 2024-11-08T19:50:11,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:11,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-08T19:50:11,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36033 2024-11-08T19:50:11,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36033 2024-11-08T19:50:11,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-08T19:50:11,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-08T19:50:11,908 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:11,908 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:11,909 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:11,909 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41507 2024-11-08T19:50:11,911 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41507 connecting to ZooKeeper ensemble=127.0.0.1:50222 2024-11-08T19:50:11,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:11,974 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415070x0, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:11,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41507-0x1011bffb7800001 connected 2024-11-08T19:50:11,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:11,976 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:11,976 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:11,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:11,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:11,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41507 2024-11-08T19:50:11,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41507 2024-11-08T19:50:11,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41507 2024-11-08T19:50:11,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41507 2024-11-08T19:50:11,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41507 2024-11-08T19:50:11,999 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:11,999 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:12,000 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46325 2024-11-08T19:50:12,002 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46325 connecting to ZooKeeper ensemble=127.0.0.1:50222 2024-11-08T19:50:12,003 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:12,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:12,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463250x0, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:12,033 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:463250x0, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:12,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46325-0x1011bffb7800002 connected 2024-11-08T19:50:12,033 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:12,034 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:12,035 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:12,036 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:12,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46325 2024-11-08T19:50:12,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46325 2024-11-08T19:50:12,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46325 2024-11-08T19:50:12,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46325 2024-11-08T19:50:12,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46325 2024-11-08T19:50:12,054 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/692b295ad45c:0 server-side Connection retries=45 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T19:50:12,054 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T19:50:12,055 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41773 2024-11-08T19:50:12,057 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41773 connecting to ZooKeeper ensemble=127.0.0.1:50222 2024-11-08T19:50:12,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:12,059 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:12,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417730x0, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T19:50:12,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417730x0, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:12,069 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T19:50:12,070 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41773-0x1011bffb7800003 connected 2024-11-08T19:50:12,070 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T19:50:12,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T19:50:12,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T19:50:12,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41773 2024-11-08T19:50:12,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41773 2024-11-08T19:50:12,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41773 2024-11-08T19:50:12,088 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41773 2024-11-08T19:50:12,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41773 2024-11-08T19:50:12,103 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;692b295ad45c:36033 2024-11-08T19:50:12,103 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,118 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,120 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:12,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:12,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,133 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T19:50:12,133 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/692b295ad45c,36033,1731095411695 from backup master directory 2024-11-08T19:50:12,136 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:12,136 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,142 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T19:50:12,142 WARN [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:12,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(347): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/backup-masters/692b295ad45c,36033,1731095411695 because node does not exist (not an error) 2024-11-08T19:50:12,142 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,149 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/hbase.id] with ID: 55bb90ae-6f88-4088-93c8-59fb9a0b3839 2024-11-08T19:50:12,149 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/.tmp/hbase.id 2024-11-08T19:50:12,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741826_1002 (size=42) 2024-11-08T19:50:12,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741826_1002 (size=42) 2024-11-08T19:50:12,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741826_1002 (size=42) 2024-11-08T19:50:12,162 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/.tmp/hbase.id]:[hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/hbase.id] 2024-11-08T19:50:12,197 INFO [master/692b295ad45c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T19:50:12,197 INFO [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T19:50:12,200 INFO [master/692b295ad45c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-08T19:50:12,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,216 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741827_1003 (size=196) 2024-11-08T19:50:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741827_1003 (size=196) 2024-11-08T19:50:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741827_1003 (size=196) 2024-11-08T19:50:12,245 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T19:50:12,246 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T19:50:12,246 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T19:50:12,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741828_1004 (size=1189) 2024-11-08T19:50:12,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741828_1004 (size=1189) 2024-11-08T19:50:12,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741828_1004 (size=1189) 2024-11-08T19:50:12,276 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store 2024-11-08T19:50:12,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741829_1005 (size=34) 2024-11-08T19:50:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741829_1005 (size=34) 2024-11-08T19:50:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741829_1005 (size=34) 2024-11-08T19:50:12,296 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:12,297 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T19:50:12,297 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:12,297 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:12,297 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T19:50:12,297 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:12,297 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:12,297 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731095412297Disabling compacts and flushes for region at 1731095412297Disabling writes for close at 1731095412297Writing region close event to WAL at 1731095412297Closed at 1731095412297 2024-11-08T19:50:12,298 WARN [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/.initializing 2024-11-08T19:50:12,298 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/WALs/692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,303 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C36033%2C1731095411695, suffix=, logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/WALs/692b295ad45c,36033,1731095411695, archiveDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/oldWALs, maxLogs=10 2024-11-08T19:50:12,304 INFO [master/692b295ad45c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 692b295ad45c%2C36033%2C1731095411695.1731095412303 2024-11-08T19:50:12,324 INFO [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/WALs/692b295ad45c,36033,1731095411695/692b295ad45c%2C36033%2C1731095411695.1731095412303 2024-11-08T19:50:12,332 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43819:43819),(127.0.0.1/127.0.0.1:45311:45311),(127.0.0.1/127.0.0.1:41401:41401)] 2024-11-08T19:50:12,335 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:12,335 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:12,335 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,335 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,338 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T19:50:12,340 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,341 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:12,341 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T19:50:12,344 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:12,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T19:50:12,349 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:12,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T19:50:12,352 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:12,354 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,355 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,356 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,358 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,358 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,359 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:12,361 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T19:50:12,364 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:12,365 INFO [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66892442, jitterRate=-0.0032249391078948975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:12,366 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731095412335Initializing all the Stores at 1731095412337 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095412337Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095412338 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095412338Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095412338Cleaning up temporary data from old regions at 1731095412358 (+20 ms)Region opened successfully at 1731095412366 (+8 ms) 2024-11-08T19:50:12,366 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T19:50:12,373 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bbd94f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:12,374 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T19:50:12,375 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T19:50:12,375 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T19:50:12,375 INFO [master/692b295ad45c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T19:50:12,376 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-08T19:50:12,377 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T19:50:12,377 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T19:50:12,383 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T19:50:12,384 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T19:50:12,421 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T19:50:12,422 INFO [master/692b295ad45c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T19:50:12,423 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T19:50:12,432 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T19:50:12,433 INFO [master/692b295ad45c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T19:50:12,434 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T19:50:12,453 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T19:50:12,455 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T19:50:12,463 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T19:50:12,467 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T19:50:12,478 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T19:50:12,489 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:12,489 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,490 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=692b295ad45c,36033,1731095411695, sessionid=0x1011bffb7800000, setting cluster-up flag (Was=false) 2024-11-08T19:50:12,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,510 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,552 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T19:50:12,554 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,584 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:12,626 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T19:50:12,627 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=692b295ad45c,36033,1731095411695 2024-11-08T19:50:12,629 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T19:50:12,632 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:12,632 INFO [master/692b295ad45c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T19:50:12,632 INFO [master/692b295ad45c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T19:50:12,632 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 692b295ad45c,36033,1731095411695 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T19:50:12,636 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:12,636 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/692b295ad45c:0, corePoolSize=5, maxPoolSize=5 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/692b295ad45c:0, corePoolSize=10, maxPoolSize=10 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:12,637 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,640 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:12,640 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T19:50:12,642 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,642 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T19:50:12,644 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731095442644 2024-11-08T19:50:12,644 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T19:50:12,644 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T19:50:12,645 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T19:50:12,645 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T19:50:12,645 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T19:50:12,645 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T19:50:12,646 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,646 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T19:50:12,646 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T19:50:12,646 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T19:50:12,647 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095412647,5,FailOnTimeoutGroup] 2024-11-08T19:50:12,647 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095412647,5,FailOnTimeoutGroup] 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,647 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741831_1007 (size=1321) 2024-11-08T19:50:12,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741831_1007 (size=1321) 2024-11-08T19:50:12,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741831_1007 (size=1321) 2024-11-08T19:50:12,660 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T19:50:12,660 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 2024-11-08T19:50:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741832_1008 (size=32) 2024-11-08T19:50:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741832_1008 (size=32) 2024-11-08T19:50:12,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741832_1008 (size=32) 2024-11-08T19:50:12,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:12,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T19:50:12,681 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T19:50:12,681 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:12,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T19:50:12,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T19:50:12,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:12,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T19:50:12,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T19:50:12,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:12,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T19:50:12,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T19:50:12,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:12,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:12,691 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T19:50:12,692 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740 2024-11-08T19:50:12,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740 2024-11-08T19:50:12,694 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(746): ClusterId : 55bb90ae-6f88-4088-93c8-59fb9a0b3839 2024-11-08T19:50:12,694 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(746): ClusterId : 55bb90ae-6f88-4088-93c8-59fb9a0b3839 2024-11-08T19:50:12,694 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(746): ClusterId : 55bb90ae-6f88-4088-93c8-59fb9a0b3839 2024-11-08T19:50:12,694 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:12,694 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:12,694 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T19:50:12,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T19:50:12,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T19:50:12,697 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:12,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T19:50:12,705 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:12,706 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74832648, jitterRate=0.11509335041046143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:12,707 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:12,707 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:12,707 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:12,707 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:12,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731095412677Initializing all the Stores at 1731095412678 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095412678Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095412679 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095412679Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095412679Cleaning up temporary data from old regions at 1731095412697 (+18 ms)Region opened successfully at 1731095412707 (+10 ms) 2024-11-08T19:50:12,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T19:50:12,708 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T19:50:12,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T19:50:12,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T19:50:12,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T19:50:12,708 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:12,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731095412707Disabling compacts and flushes for region at 1731095412707Disabling writes for close at 1731095412708 (+1 ms)Writing region close event to WAL at 1731095412708Closed at 1731095412708 2024-11-08T19:50:12,710 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:12,710 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T19:50:12,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T19:50:12,712 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T19:50:12,714 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T19:50:12,727 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T19:50:12,727 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T19:50:12,727 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:12,728 DEBUG [RS:1;692b295ad45c:46325 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ef5930, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:12,737 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:12,738 DEBUG [RS:2;692b295ad45c:41773 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59237c84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:12,738 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T19:50:12,738 DEBUG [RS:0;692b295ad45c:41507 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b5d40e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=692b295ad45c/172.17.0.2:0 2024-11-08T19:50:12,740 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;692b295ad45c:46325 2024-11-08T19:50:12,740 INFO [RS:1;692b295ad45c:46325 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:12,740 INFO [RS:1;692b295ad45c:46325 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:12,740 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:12,741 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,36033,1731095411695 with port=46325, startcode=1731095411998 2024-11-08T19:50:12,741 DEBUG [RS:1;692b295ad45c:46325 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:12,745 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:12,746 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,746 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,749 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 2024-11-08T19:50:12,749 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43011 2024-11-08T19:50:12,749 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:12,750 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;692b295ad45c:41507 2024-11-08T19:50:12,750 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;692b295ad45c:41773 2024-11-08T19:50:12,751 INFO [RS:2;692b295ad45c:41773 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:12,751 INFO [RS:2;692b295ad45c:41773 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:12,751 INFO [RS:0;692b295ad45c:41507 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T19:50:12,751 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:12,751 INFO [RS:0;692b295ad45c:41507 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T19:50:12,751 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T19:50:12,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:12,759 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,36033,1731095411695 with port=41507, startcode=1731095411907 2024-11-08T19:50:12,759 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(2659): reportForDuty to master=692b295ad45c,36033,1731095411695 with port=41773, startcode=1731095412053 2024-11-08T19:50:12,759 DEBUG [RS:0;692b295ad45c:41507 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:12,759 DEBUG [RS:2;692b295ad45c:41773 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T19:50:12,759 DEBUG [RS:1;692b295ad45c:46325 {}] zookeeper.ZKUtil(111): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,759 WARN [RS:1;692b295ad45c:46325 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:12,759 INFO [RS:1;692b295ad45c:46325 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T19:50:12,759 DEBUG [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,760 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,46325,1731095411998] 2024-11-08T19:50:12,761 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32791, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:12,761 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59979, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T19:50:12,762 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,762 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,764 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,764 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36033 {}] master.ServerManager(517): Registering regionserver=692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,764 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 2024-11-08T19:50:12,764 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43011 2024-11-08T19:50:12,765 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:12,765 INFO [RS:1;692b295ad45c:46325 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:12,768 INFO [RS:1;692b295ad45c:46325 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:12,768 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 2024-11-08T19:50:12,768 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43011 2024-11-08T19:50:12,768 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T19:50:12,769 INFO [RS:1;692b295ad45c:46325 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:12,769 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,769 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:12,771 INFO [RS:1;692b295ad45c:46325 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:12,771 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,771 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,772 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,772 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,772 DEBUG [RS:1;692b295ad45c:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,773 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,774 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,774 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,774 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,774 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,774 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,46325,1731095411998-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:12,796 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:12,796 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,46325,1731095411998-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,797 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,797 INFO [RS:1;692b295ad45c:46325 {}] regionserver.Replication(171): 692b295ad45c,46325,1731095411998 started 2024-11-08T19:50:12,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:12,814 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,814 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,46325,1731095411998, RpcServer on 692b295ad45c/172.17.0.2:46325, sessionid=0x1011bffb7800002 2024-11-08T19:50:12,814 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:12,814 DEBUG [RS:1;692b295ad45c:46325 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,814 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,46325,1731095411998' 2024-11-08T19:50:12,814 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:12,826 DEBUG [RS:2;692b295ad45c:41773 {}] zookeeper.ZKUtil(111): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,826 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:12,826 WARN [RS:2;692b295ad45c:41773 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:12,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,41773,1731095412053] 2024-11-08T19:50:12,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [692b295ad45c,41507,1731095411907] 2024-11-08T19:50:12,826 INFO [RS:2;692b295ad45c:41773 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T19:50:12,826 DEBUG [RS:0;692b295ad45c:41507 {}] zookeeper.ZKUtil(111): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,826 WARN [RS:0;692b295ad45c:41507 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T19:50:12,826 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,826 INFO [RS:0;692b295ad45c:41507 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T19:50:12,826 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:12,826 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:12,827 DEBUG [RS:1;692b295ad45c:46325 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,46325,1731095411998 2024-11-08T19:50:12,827 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,827 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,46325,1731095411998' 2024-11-08T19:50:12,827 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:12,827 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:12,828 DEBUG [RS:1;692b295ad45c:46325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:12,828 INFO [RS:1;692b295ad45c:46325 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:12,828 INFO [RS:1;692b295ad45c:46325 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:12,830 INFO [RS:0;692b295ad45c:41507 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:12,830 INFO [RS:2;692b295ad45c:41773 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T19:50:12,834 INFO [RS:2;692b295ad45c:41773 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:12,834 INFO [RS:0;692b295ad45c:41507 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T19:50:12,834 INFO [RS:2;692b295ad45c:41773 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:12,834 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,834 INFO [RS:0;692b295ad45c:41507 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T19:50:12,834 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,835 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:12,835 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T19:50:12,836 INFO [RS:2;692b295ad45c:41773 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:12,836 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,836 INFO [RS:0;692b295ad45c:41507 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T19:50:12,836 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,836 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/692b295ad45c:0, corePoolSize=2, maxPoolSize=2 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/692b295ad45c:0, corePoolSize=1, maxPoolSize=1 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,837 DEBUG [RS:2;692b295ad45c:41773 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,837 DEBUG [RS:0;692b295ad45c:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0, corePoolSize=3, maxPoolSize=3 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,41507,1731095411907-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:12,838 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,41773,1731095412053-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:12,851 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:12,851 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,41507,1731095411907-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,852 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,852 INFO [RS:0;692b295ad45c:41507 {}] regionserver.Replication(171): 692b295ad45c,41507,1731095411907 started 2024-11-08T19:50:12,859 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T19:50:12,859 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,41773,1731095412053-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,859 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,859 INFO [RS:2;692b295ad45c:41773 {}] regionserver.Replication(171): 692b295ad45c,41773,1731095412053 started 2024-11-08T19:50:12,864 WARN [692b295ad45c:36033 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T19:50:12,864 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,864 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,41507,1731095411907, RpcServer on 692b295ad45c/172.17.0.2:41507, sessionid=0x1011bffb7800001 2024-11-08T19:50:12,865 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:12,865 DEBUG [RS:0;692b295ad45c:41507 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,865 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,41507,1731095411907' 2024-11-08T19:50:12,865 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:12,865 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,41507,1731095411907 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,41507,1731095411907' 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:12,866 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:12,867 DEBUG [RS:0;692b295ad45c:41507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:12,867 INFO [RS:0;692b295ad45c:41507 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:12,867 INFO [RS:0;692b295ad45c:41507 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:12,878 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:12,878 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1482): Serving as 692b295ad45c,41773,1731095412053, RpcServer on 692b295ad45c/172.17.0.2:41773, sessionid=0x1011bffb7800003 2024-11-08T19:50:12,879 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T19:50:12,879 DEBUG [RS:2;692b295ad45c:41773 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,879 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,41773,1731095412053' 2024-11-08T19:50:12,879 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T19:50:12,879 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T19:50:12,880 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T19:50:12,880 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T19:50:12,880 DEBUG [RS:2;692b295ad45c:41773 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 692b295ad45c,41773,1731095412053 2024-11-08T19:50:12,880 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '692b295ad45c,41773,1731095412053' 2024-11-08T19:50:12,880 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T19:50:12,881 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T19:50:12,881 DEBUG [RS:2;692b295ad45c:41773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T19:50:12,881 INFO [RS:2;692b295ad45c:41773 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T19:50:12,881 INFO [RS:2;692b295ad45c:41773 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T19:50:12,931 INFO [RS:1;692b295ad45c:46325 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C46325%2C1731095411998, suffix=, logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,46325,1731095411998, archiveDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs, maxLogs=32 2024-11-08T19:50:12,932 INFO [RS:1;692b295ad45c:46325 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 692b295ad45c%2C46325%2C1731095411998.1731095412931 2024-11-08T19:50:12,940 INFO [RS:1;692b295ad45c:46325 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,46325,1731095411998/692b295ad45c%2C46325%2C1731095411998.1731095412931 2024-11-08T19:50:12,941 DEBUG [RS:1;692b295ad45c:46325 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41401:41401),(127.0.0.1/127.0.0.1:43819:43819),(127.0.0.1/127.0.0.1:45311:45311)] 2024-11-08T19:50:12,970 INFO [RS:0;692b295ad45c:41507 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C41507%2C1731095411907, suffix=, logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41507,1731095411907, archiveDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs, maxLogs=32 2024-11-08T19:50:12,970 INFO [RS:0;692b295ad45c:41507 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 692b295ad45c%2C41507%2C1731095411907.1731095412970 2024-11-08T19:50:12,978 INFO [RS:0;692b295ad45c:41507 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41507,1731095411907/692b295ad45c%2C41507%2C1731095411907.1731095412970 2024-11-08T19:50:12,979 DEBUG [RS:0;692b295ad45c:41507 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45311:45311),(127.0.0.1/127.0.0.1:43819:43819),(127.0.0.1/127.0.0.1:41401:41401)] 2024-11-08T19:50:12,984 INFO [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C41773%2C1731095412053, suffix=, logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41773,1731095412053, archiveDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs, maxLogs=32 2024-11-08T19:50:12,985 INFO [RS:2;692b295ad45c:41773 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 692b295ad45c%2C41773%2C1731095412053.1731095412985 2024-11-08T19:50:12,994 INFO [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41773,1731095412053/692b295ad45c%2C41773%2C1731095412053.1731095412985 2024-11-08T19:50:12,995 DEBUG [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41401:41401),(127.0.0.1/127.0.0.1:45311:45311),(127.0.0.1/127.0.0.1:43819:43819)] 2024-11-08T19:50:13,115 DEBUG [692b295ad45c:36033 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T19:50:13,115 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(204): Hosts are {692b295ad45c=0} racks are {/default-rack=0} 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T19:50:13,118 INFO [692b295ad45c:36033 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T19:50:13,118 INFO [692b295ad45c:36033 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T19:50:13,118 INFO [692b295ad45c:36033 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T19:50:13,118 DEBUG [692b295ad45c:36033 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T19:50:13,118 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=692b295ad45c,41773,1731095412053 2024-11-08T19:50:13,120 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 692b295ad45c,41773,1731095412053, state=OPENING 2024-11-08T19:50:13,131 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T19:50:13,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:13,142 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:13,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:13,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:13,142 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T19:50:13,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=692b295ad45c,41773,1731095412053}] 2024-11-08T19:50:13,143 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,143 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,298 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T19:50:13,299 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T19:50:13,303 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T19:50:13,304 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T19:50:13,306 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=692b295ad45c%2C41773%2C1731095412053.meta, suffix=.meta, logDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41773,1731095412053, archiveDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs, maxLogs=32 2024-11-08T19:50:13,308 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 692b295ad45c%2C41773%2C1731095412053.meta.1731095413307.meta 2024-11-08T19:50:13,321 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/WALs/692b295ad45c,41773,1731095412053/692b295ad45c%2C41773%2C1731095412053.meta.1731095413307.meta 2024-11-08T19:50:13,323 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43819:43819),(127.0.0.1/127.0.0.1:45311:45311),(127.0.0.1/127.0.0.1:41401:41401)] 2024-11-08T19:50:13,323 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T19:50:13,324 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T19:50:13,324 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T19:50:13,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T19:50:13,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T19:50:13,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:13,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T19:50:13,329 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T19:50:13,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:13,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T19:50:13,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T19:50:13,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:13,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T19:50:13,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T19:50:13,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T19:50:13,333 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T19:50:13,334 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740 2024-11-08T19:50:13,336 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740 2024-11-08T19:50:13,337 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T19:50:13,337 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T19:50:13,338 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T19:50:13,340 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T19:50:13,341 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66216255, jitterRate=-0.013300910592079163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T19:50:13,341 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T19:50:13,343 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731095413325Writing region info on filesystem at 1731095413325Initializing all the Stores at 1731095413326 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095413326Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095413326Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095413326Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731095413326Cleaning up temporary data from old regions at 1731095413337 (+11 ms)Running coprocessor post-open hooks at 1731095413342 (+5 ms)Region opened successfully at 1731095413343 (+1 ms) 2024-11-08T19:50:13,345 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731095413297 2024-11-08T19:50:13,349 DEBUG [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T19:50:13,349 INFO [RS_OPEN_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T19:50:13,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=692b295ad45c,41773,1731095412053 2024-11-08T19:50:13,352 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 692b295ad45c,41773,1731095412053, state=OPEN 2024-11-08T19:50:13,363 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:13,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:13,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:13,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T19:50:13,363 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,363 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,363 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,363 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=692b295ad45c,41773,1731095412053 2024-11-08T19:50:13,363 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T19:50:13,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T19:50:13,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=692b295ad45c,41773,1731095412053 in 221 msec 2024-11-08T19:50:13,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T19:50:13,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 658 msec 2024-11-08T19:50:13,374 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T19:50:13,374 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T19:50:13,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T19:50:13,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=692b295ad45c,41773,1731095412053, seqNum=-1] 2024-11-08T19:50:13,376 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T19:50:13,378 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53463, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T19:50:13,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 755 msec 2024-11-08T19:50:13,388 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731095413388, completionTime=-1 2024-11-08T19:50:13,388 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T19:50:13,388 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T19:50:13,391 INFO [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T19:50:13,391 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731095473391 2024-11-08T19:50:13,391 INFO [master/692b295ad45c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731095533391 2024-11-08T19:50:13,391 INFO [master/692b295ad45c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-692b295ad45c:36033, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,392 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,395 DEBUG [master/692b295ad45c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T19:50:13,398 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.256sec 2024-11-08T19:50:13,398 INFO [master/692b295ad45c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T19:50:13,399 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T19:50:13,403 DEBUG [master/692b295ad45c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T19:50:13,403 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T19:50:13,403 INFO [master/692b295ad45c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=692b295ad45c,36033,1731095411695-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T19:50:13,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e402b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:13,494 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 692b295ad45c,36033,-1 for getting cluster id 2024-11-08T19:50:13,495 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T19:50:13,496 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '55bb90ae-6f88-4088-93c8-59fb9a0b3839' 2024-11-08T19:50:13,497 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T19:50:13,497 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "55bb90ae-6f88-4088-93c8-59fb9a0b3839" 2024-11-08T19:50:13,497 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7df0ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:13,497 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [692b295ad45c,36033,-1] 2024-11-08T19:50:13,498 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T19:50:13,498 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:13,500 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T19:50:13,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c67938, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T19:50:13,502 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T19:50:13,503 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=692b295ad45c,41773,1731095412053, seqNum=-1] 2024-11-08T19:50:13,504 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T19:50:13,505 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39574, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T19:50:13,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=692b295ad45c,36033,1731095411695 2024-11-08T19:50:13,509 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T19:50:13,510 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 692b295ad45c,36033,1731095411695 2024-11-08T19:50:13,510 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2075e3a6 2024-11-08T19:50:13,510 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T19:50:13,512 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T19:50:13,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T19:50:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T19:50:13,517 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T19:50:13,517 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T19:50:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:13,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T19:50:13,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741837_1013 (size=392) 2024-11-08T19:50:13,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741837_1013 (size=392) 2024-11-08T19:50:13,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741837_1013 (size=392) 2024-11-08T19:50:13,533 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4f84cbe5d74462ea8db04f9699c9035a, NAME => 'TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1 2024-11-08T19:50:13,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741838_1014 (size=51) 2024-11-08T19:50:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741838_1014 (size=51) 2024-11-08T19:50:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741838_1014 (size=51) 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4f84cbe5d74462ea8db04f9699c9035a, disabling compactions & flushes 2024-11-08T19:50:13,548 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. after waiting 0 ms 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,548 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,548 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4f84cbe5d74462ea8db04f9699c9035a: Waiting for close lock at 1731095413548Disabling compacts and flushes for region at 1731095413548Disabling writes for close at 1731095413548Writing region close event to WAL at 1731095413548Closed at 1731095413548 2024-11-08T19:50:13,550 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T19:50:13,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731095413550"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731095413550"}]},"ts":"1731095413550"} 2024-11-08T19:50:13,555 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T19:50:13,557 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T19:50:13,557 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731095413557"}]},"ts":"1731095413557"} 2024-11-08T19:50:13,561 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T19:50:13,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {692b295ad45c=0} racks are {/default-rack=0} 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T19:50:13,562 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T19:50:13,562 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T19:50:13,562 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T19:50:13,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T19:50:13,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f84cbe5d74462ea8db04f9699c9035a, ASSIGN}] 2024-11-08T19:50:13,565 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f84cbe5d74462ea8db04f9699c9035a, ASSIGN 2024-11-08T19:50:13,567 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f84cbe5d74462ea8db04f9699c9035a, ASSIGN; state=OFFLINE, location=692b295ad45c,41507,1731095411907; forceNewPlan=false, retain=false 2024-11-08T19:50:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:13,717 INFO [692b295ad45c:36033 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T19:50:13,717 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f84cbe5d74462ea8db04f9699c9035a, regionState=OPENING, regionLocation=692b295ad45c,41507,1731095411907 2024-11-08T19:50:13,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f84cbe5d74462ea8db04f9699c9035a, ASSIGN because future has completed 2024-11-08T19:50:13,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f84cbe5d74462ea8db04f9699c9035a, server=692b295ad45c,41507,1731095411907}] 2024-11-08T19:50:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:13,876 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T19:50:13,878 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T19:50:13,882 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,882 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f84cbe5d74462ea8db04f9699c9035a, NAME => 'TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.', STARTKEY => '', ENDKEY => ''} 2024-11-08T19:50:13,883 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,883 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T19:50:13,883 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,883 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,885 INFO [StoreOpener-4f84cbe5d74462ea8db04f9699c9035a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,887 INFO [StoreOpener-4f84cbe5d74462ea8db04f9699c9035a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f84cbe5d74462ea8db04f9699c9035a columnFamilyName cf 2024-11-08T19:50:13,887 DEBUG [StoreOpener-4f84cbe5d74462ea8db04f9699c9035a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T19:50:13,887 INFO [StoreOpener-4f84cbe5d74462ea8db04f9699c9035a-1 {}] regionserver.HStore(327): Store=4f84cbe5d74462ea8db04f9699c9035a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T19:50:13,888 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,889 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,889 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,890 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,890 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,892 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,895 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T19:50:13,896 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4f84cbe5d74462ea8db04f9699c9035a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60671410, jitterRate=-0.09592553973197937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T19:50:13,896 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:13,897 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4f84cbe5d74462ea8db04f9699c9035a: Running coprocessor pre-open hook at 1731095413883Writing region info on filesystem at 1731095413883Initializing all the Stores at 1731095413884 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731095413885 (+1 ms)Cleaning up temporary data from old regions at 1731095413890 (+5 ms)Running coprocessor post-open hooks at 1731095413896 (+6 ms)Region opened successfully at 1731095413897 (+1 ms) 2024-11-08T19:50:13,899 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a., pid=6, masterSystemTime=1731095413876 2024-11-08T19:50:13,902 DEBUG [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,902 INFO [RS_OPEN_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:13,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f84cbe5d74462ea8db04f9699c9035a, regionState=OPEN, openSeqNum=2, regionLocation=692b295ad45c,41507,1731095411907 2024-11-08T19:50:13,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f84cbe5d74462ea8db04f9699c9035a, server=692b295ad45c,41507,1731095411907 because future has completed 2024-11-08T19:50:13,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T19:50:13,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4f84cbe5d74462ea8db04f9699c9035a, server=692b295ad45c,41507,1731095411907 in 187 msec 2024-11-08T19:50:13,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T19:50:13,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4f84cbe5d74462ea8db04f9699c9035a, ASSIGN in 350 msec 2024-11-08T19:50:13,920 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T19:50:13,920 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731095413920"}]},"ts":"1731095413920"} 2024-11-08T19:50:13,924 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T19:50:13,925 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T19:50:13,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 412 msec 2024-11-08T19:50:14,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T19:50:14,146 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T19:50:14,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T19:50:14,146 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T19:50:14,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T19:50:14,150 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T19:50:14,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T19:50:14,154 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a., hostname=692b295ad45c,41507,1731095411907, seqNum=2] 2024-11-08T19:50:14,154 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T19:50:14,157 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47742, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T19:50:14,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-08T19:50:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T19:50:14,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:14,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T19:50:14,171 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T19:50:14,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T19:50:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:14,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41507 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T19:50:14,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,328 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4f84cbe5d74462ea8db04f9699c9035a 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T19:50:14,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/.tmp/cf/f3f84804663444439cfeab8ce3e44dc7 is 36, key is row/cf:cq/1731095414158/Put/seqid=0 2024-11-08T19:50:14,345 WARN [IPC Server handler 1 on default port 43011 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T19:50:14,346 WARN [IPC Server handler 1 on default port 43011 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T19:50:14,346 WARN [IPC Server handler 1 on default port 43011 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T19:50:14,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741839_1015 (size=4787) 2024-11-08T19:50:14,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741839_1015 (size=4787) 2024-11-08T19:50:14,351 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/.tmp/cf/f3f84804663444439cfeab8ce3e44dc7 2024-11-08T19:50:14,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/.tmp/cf/f3f84804663444439cfeab8ce3e44dc7 as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/cf/f3f84804663444439cfeab8ce3e44dc7 2024-11-08T19:50:14,367 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/cf/f3f84804663444439cfeab8ce3e44dc7, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T19:50:14,369 INFO [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4f84cbe5d74462ea8db04f9699c9035a in 41ms, sequenceid=5, compaction requested=false 2024-11-08T19:50:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4f84cbe5d74462ea8db04f9699c9035a: 2024-11-08T19:50:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/692b295ad45c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T19:50:14,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T19:50:14,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T19:50:14,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-08T19:50:14,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-11-08T19:50:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T19:50:14,486 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T19:50:14,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T19:50:14,491 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T19:50:14,491 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:14,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,491 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T19:50:14,492 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T19:50:14,492 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2055269099, stopped=false 2024-11-08T19:50:14,492 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=692b295ad45c,36033,1731095411695 2024-11-08T19:50:14,563 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:14,563 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:14,563 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T19:50:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:14,563 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T19:50:14,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:14,563 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:14,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:14,564 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:14,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,41507,1731095411907' ***** 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,46325,1731095411998' ***** 2024-11-08T19:50:14,564 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:14,564 INFO [RS:0;692b295ad45c:41507 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:14,564 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '692b295ad45c,41773,1731095412053' ***** 2024-11-08T19:50:14,564 INFO [RS:0;692b295ad45c:41507 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:14,564 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T19:50:14,564 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(3091): Received CLOSE for 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:14,564 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T19:50:14,564 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:14,565 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,46325,1731095411998 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:14,565 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,41507,1731095411907 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;692b295ad45c:46325. 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T19:50:14,565 INFO [RS:0;692b295ad45c:41507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T19:50:14,565 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T19:50:14,565 INFO [RS:0;692b295ad45c:41507 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;692b295ad45c:41507. 2024-11-08T19:50:14,565 DEBUG [RS:1;692b295ad45c:46325 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T19:50:14,565 DEBUG [RS:1;692b295ad45c:46325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(959): stopping server 692b295ad45c,41773,1731095412053 2024-11-08T19:50:14,565 DEBUG [RS:0;692b295ad45c:41507 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:14,565 DEBUG [RS:0;692b295ad45c:41507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,565 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,46325,1731095411998; all regions closed. 2024-11-08T19:50:14,565 INFO [RS:2;692b295ad45c:41773 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;692b295ad45c:41773. 2024-11-08T19:50:14,565 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T19:50:14,565 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1325): Online Regions={4f84cbe5d74462ea8db04f9699c9035a=TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a.} 2024-11-08T19:50:14,565 DEBUG [RS:2;692b295ad45c:41773 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T19:50:14,565 DEBUG [RS:2;692b295ad45c:41773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,565 DEBUG [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1351): Waiting on 4f84cbe5d74462ea8db04f9699c9035a 2024-11-08T19:50:14,566 INFO [RS:2;692b295ad45c:41773 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:14,566 INFO [RS:2;692b295ad45c:41773 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:14,566 INFO [RS:2;692b295ad45c:41773 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:14,566 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4f84cbe5d74462ea8db04f9699c9035a, disabling compactions & flushes 2024-11-08T19:50:14,566 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T19:50:14,566 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,566 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T19:50:14,566 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-08T19:50:14,566 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,566 DEBUG [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T19:50:14,566 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,566 INFO [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,566 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T19:50:14,566 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,566 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,566 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T19:50:14,566 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. after waiting 0 ms 2024-11-08T19:50:14,566 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,567 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T19:50:14,567 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,567 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T19:50:14,567 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T19:50:14,567 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T19:50:14,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741833_1009 (size=93) 2024-11-08T19:50:14,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741833_1009 (size=93) 2024-11-08T19:50:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741833_1009 (size=93) 2024-11-08T19:50:14,572 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/default/TestHBaseWalOnEC/4f84cbe5d74462ea8db04f9699c9035a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T19:50:14,574 INFO [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,574 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4f84cbe5d74462ea8db04f9699c9035a: Waiting for close lock at 1731095414566Running coprocessor pre-close hooks at 1731095414566Disabling compacts and flushes for region at 1731095414566Disabling writes for close at 1731095414567 (+1 ms)Writing region close event to WAL at 1731095414568 (+1 ms)Running coprocessor post-close hooks at 1731095414573 (+5 ms)Closed at 1731095414574 (+1 ms) 2024-11-08T19:50:14,574 DEBUG [RS_CLOSE_REGION-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a. 2024-11-08T19:50:14,575 DEBUG [RS:1;692b295ad45c:46325 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs 2024-11-08T19:50:14,575 INFO [RS:1;692b295ad45c:46325 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 692b295ad45c%2C46325%2C1731095411998:(num 1731095412931) 2024-11-08T19:50:14,575 DEBUG [RS:1;692b295ad45c:46325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,575 INFO [RS:1;692b295ad45c:46325 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,575 INFO [RS:1;692b295ad45c:46325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:14,575 INFO [RS:1;692b295ad45c:46325 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:14,576 INFO [RS:1;692b295ad45c:46325 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:14,576 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:14,576 INFO [RS:1;692b295ad45c:46325 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:14,576 INFO [RS:1;692b295ad45c:46325 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:14,576 INFO [RS:1;692b295ad45c:46325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:14,576 INFO [RS:1;692b295ad45c:46325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46325 2024-11-08T19:50:14,576 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,46325,1731095411998 2024-11-08T19:50:14,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:14,583 INFO [RS:1;692b295ad45c:46325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:14,589 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/info/8dc8726b5a614aaeaed0c372e4c41efe is 153, key is TestHBaseWalOnEC,,1731095413512.4f84cbe5d74462ea8db04f9699c9035a./info:regioninfo/1731095413903/Put/seqid=0 2024-11-08T19:50:14,594 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,46325,1731095411998] 2024-11-08T19:50:14,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741840_1016 (size=6637) 2024-11-08T19:50:14,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741840_1016 (size=6637) 2024-11-08T19:50:14,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741840_1016 (size=6637) 2024-11-08T19:50:14,598 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/info/8dc8726b5a614aaeaed0c372e4c41efe 2024-11-08T19:50:14,604 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,46325,1731095411998 already deleted, retry=false 2024-11-08T19:50:14,605 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,46325,1731095411998 expired; onlineServers=2 2024-11-08T19:50:14,625 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/ns/04f5e65b22cd4ba2aaca42d1947f3127 is 43, key is default/ns:d/1731095413379/Put/seqid=0 2024-11-08T19:50:14,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741841_1017 (size=5153) 2024-11-08T19:50:14,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741841_1017 (size=5153) 2024-11-08T19:50:14,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741841_1017 (size=5153) 2024-11-08T19:50:14,634 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/ns/04f5e65b22cd4ba2aaca42d1947f3127 2024-11-08T19:50:14,640 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,641 INFO [regionserver/692b295ad45c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,657 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/table/df78cf9f62d249e48bec9732f1244f53 is 52, key is TestHBaseWalOnEC/table:state/1731095413920/Put/seqid=0 2024-11-08T19:50:14,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741842_1018 (size=5249) 2024-11-08T19:50:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741842_1018 (size=5249) 2024-11-08T19:50:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741842_1018 (size=5249) 2024-11-08T19:50:14,666 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/table/df78cf9f62d249e48bec9732f1244f53 2024-11-08T19:50:14,675 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/info/8dc8726b5a614aaeaed0c372e4c41efe as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/info/8dc8726b5a614aaeaed0c372e4c41efe 2024-11-08T19:50:14,683 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/info/8dc8726b5a614aaeaed0c372e4c41efe, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T19:50:14,685 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/ns/04f5e65b22cd4ba2aaca42d1947f3127 as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/ns/04f5e65b22cd4ba2aaca42d1947f3127 2024-11-08T19:50:14,694 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/ns/04f5e65b22cd4ba2aaca42d1947f3127, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T19:50:14,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x1011bffb7800002, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,694 INFO [RS:1;692b295ad45c:46325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:14,694 INFO [RS:1;692b295ad45c:46325 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,46325,1731095411998; zookeeper connection closed. 2024-11-08T19:50:14,695 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b1914b2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b1914b2 2024-11-08T19:50:14,696 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/.tmp/table/df78cf9f62d249e48bec9732f1244f53 as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/table/df78cf9f62d249e48bec9732f1244f53 2024-11-08T19:50:14,706 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/table/df78cf9f62d249e48bec9732f1244f53, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T19:50:14,708 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false 2024-11-08T19:50:14,716 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T19:50:14,716 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T19:50:14,717 INFO [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:14,717 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731095414566Running coprocessor pre-close hooks at 1731095414566Disabling compacts and flushes for region at 1731095414566Disabling writes for close at 1731095414567 (+1 ms)Obtaining lock to block concurrent updates at 1731095414567Preparing flush snapshotting stores in 1588230740 at 1731095414567Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731095414567Flushing stores of hbase:meta,,1.1588230740 at 1731095414568 (+1 ms)Flushing 1588230740/info: creating writer at 1731095414568Flushing 1588230740/info: appending metadata at 1731095414589 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731095414589Flushing 1588230740/ns: creating writer at 1731095414607 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731095414625 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731095414625Flushing 1588230740/table: creating writer at 1731095414642 (+17 ms)Flushing 1588230740/table: appending metadata at 1731095414656 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731095414656Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79839bd6: reopening flushed file at 1731095414674 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71cb0ab: reopening flushed file at 1731095414684 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2843d97c: reopening flushed file at 1731095414694 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false at 1731095414708 (+14 ms)Writing region close event to WAL at 1731095414709 (+1 ms)Running coprocessor post-close hooks at 1731095414716 (+7 ms)Closed at 1731095414717 (+1 ms) 2024-11-08T19:50:14,717 DEBUG [RS_CLOSE_META-regionserver/692b295ad45c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T19:50:14,766 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,41507,1731095411907; all regions closed. 2024-11-08T19:50:14,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,766 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(976): stopping server 692b295ad45c,41773,1731095412053; all regions closed. 2024-11-08T19:50:14,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741834_1010 (size=1298) 2024-11-08T19:50:14,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741834_1010 (size=1298) 2024-11-08T19:50:14,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741834_1010 (size=1298) 2024-11-08T19:50:14,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741836_1012 (size=2751) 2024-11-08T19:50:14,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741836_1012 (size=2751) 2024-11-08T19:50:14,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741836_1012 (size=2751) 2024-11-08T19:50:14,776 DEBUG [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs 2024-11-08T19:50:14,776 DEBUG [RS:0;692b295ad45c:41507 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs 2024-11-08T19:50:14,777 INFO [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 692b295ad45c%2C41773%2C1731095412053.meta:.meta(num 1731095413307) 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 692b295ad45c%2C41507%2C1731095411907:(num 1731095412970) 2024-11-08T19:50:14,777 DEBUG [RS:0;692b295ad45c:41507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T19:50:14,777 INFO [RS:0;692b295ad45c:41507 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T19:50:14,778 INFO [RS:0;692b295ad45c:41507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:14,778 INFO [RS:0;692b295ad45c:41507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41507 2024-11-08T19:50:14,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,778 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:14,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:14,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741835_1011 (size=93) 2024-11-08T19:50:14,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741835_1011 (size=93) 2024-11-08T19:50:14,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741835_1011 (size=93) 2024-11-08T19:50:14,790 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,41507,1731095411907 2024-11-08T19:50:14,790 INFO [RS:0;692b295ad45c:41507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:14,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:14,802 DEBUG [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/oldWALs 2024-11-08T19:50:14,802 INFO [RS:2;692b295ad45c:41773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 692b295ad45c%2C41773%2C1731095412053:(num 1731095412985) 2024-11-08T19:50:14,802 DEBUG [RS:2;692b295ad45c:41773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T19:50:14,802 INFO [RS:2;692b295ad45c:41773 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T19:50:14,802 INFO [RS:2;692b295ad45c:41773 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:14,802 INFO [RS:2;692b295ad45c:41773 {}] hbase.ChoreService(370): Chore service for: regionserver/692b295ad45c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:14,803 INFO [RS:2;692b295ad45c:41773 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:14,803 INFO [regionserver/692b295ad45c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:14,803 INFO [RS:2;692b295ad45c:41773 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41773 2024-11-08T19:50:14,805 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,41507,1731095411907] 2024-11-08T19:50:14,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/692b295ad45c,41773,1731095412053 2024-11-08T19:50:14,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T19:50:14,815 INFO [RS:2;692b295ad45c:41773 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:14,816 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007fc8d08f78d8@1aca9583 rejected from java.util.concurrent.ThreadPoolExecutor@5fc818be[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-08T19:50:14,826 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,41507,1731095411907 already deleted, retry=false 2024-11-08T19:50:14,826 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,41507,1731095411907 expired; onlineServers=1 2024-11-08T19:50:14,836 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [692b295ad45c,41773,1731095412053] 2024-11-08T19:50:14,847 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/692b295ad45c,41773,1731095412053 already deleted, retry=false 2024-11-08T19:50:14,847 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 692b295ad45c,41773,1731095412053 expired; onlineServers=0 2024-11-08T19:50:14,847 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '692b295ad45c,36033,1731095411695' ***** 2024-11-08T19:50:14,847 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T19:50:14,847 INFO [M:0;692b295ad45c:36033 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T19:50:14,847 INFO [M:0;692b295ad45c:36033 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T19:50:14,848 DEBUG [M:0;692b295ad45c:36033 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T19:50:14,848 DEBUG [M:0;692b295ad45c:36033 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T19:50:14,848 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T19:50:14,848 DEBUG [master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095412647 {}] cleaner.HFileCleaner(306): Exit Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.small.0-1731095412647,5,FailOnTimeoutGroup] 2024-11-08T19:50:14,848 INFO [M:0;692b295ad45c:36033 {}] hbase.ChoreService(370): Chore service for: master/692b295ad45c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T19:50:14,848 INFO [M:0;692b295ad45c:36033 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T19:50:14,848 DEBUG [M:0;692b295ad45c:36033 {}] master.HMaster(1795): Stopping service threads 2024-11-08T19:50:14,848 INFO [M:0;692b295ad45c:36033 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T19:50:14,848 DEBUG [master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095412647 {}] cleaner.HFileCleaner(306): Exit Thread[master/692b295ad45c:0:becomeActiveMaster-HFileCleaner.large.0-1731095412647,5,FailOnTimeoutGroup] 2024-11-08T19:50:14,849 INFO [M:0;692b295ad45c:36033 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T19:50:14,849 INFO [M:0;692b295ad45c:36033 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T19:50:14,850 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T19:50:14,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T19:50:14,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T19:50:14,876 DEBUG [M:0;692b295ad45c:36033 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-08T19:50:14,877 DEBUG [M:0;692b295ad45c:36033 {}] master.ActiveMasterManager(353): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-08T19:50:14,878 INFO [M:0;692b295ad45c:36033 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/.lastflushedseqids 2024-11-08T19:50:14,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741843_1019 (size=127) 2024-11-08T19:50:14,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741843_1019 (size=127) 2024-11-08T19:50:14,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741843_1019 (size=127) 2024-11-08T19:50:14,905 INFO [M:0;692b295ad45c:36033 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T19:50:14,905 INFO [M:0;692b295ad45c:36033 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T19:50:14,906 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T19:50:14,906 INFO [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:14,906 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:14,906 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T19:50:14,906 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:14,906 INFO [M:0;692b295ad45c:36033 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-08T19:50:14,908 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,908 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1011bffb7800001, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,908 INFO [RS:0;692b295ad45c:41507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:14,908 INFO [RS:0;692b295ad45c:41507 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,41507,1731095411907; zookeeper connection closed. 2024-11-08T19:50:14,909 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a428eba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a428eba 2024-11-08T19:50:14,932 DEBUG [M:0;692b295ad45c:36033 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7d71f59686145b78ce88302acc2cb3e is 82, key is hbase:meta,,1/info:regioninfo/1731095413350/Put/seqid=0 2024-11-08T19:50:14,936 INFO [RS:2;692b295ad45c:41773 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:14,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,937 INFO [RS:2;692b295ad45c:41773 {}] regionserver.HRegionServer(1031): Exiting; stopping=692b295ad45c,41773,1731095412053; zookeeper connection closed. 2024-11-08T19:50:14,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41773-0x1011bffb7800003, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:14,941 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 2024-11-08T19:50:14,941 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T19:50:14,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741844_1020 (size=5672) 2024-11-08T19:50:14,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741844_1020 (size=5672) 2024-11-08T19:50:14,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741844_1020 (size=5672) 2024-11-08T19:50:14,954 INFO [M:0;692b295ad45c:36033 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7d71f59686145b78ce88302acc2cb3e 2024-11-08T19:50:14,985 DEBUG [M:0;692b295ad45c:36033 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5fceccd106440a9aefce9077d73a243 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731095413928/Put/seqid=0 2024-11-08T19:50:15,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741845_1021 (size=6440) 2024-11-08T19:50:15,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741845_1021 (size=6440) 2024-11-08T19:50:15,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741845_1021 (size=6440) 2024-11-08T19:50:15,014 INFO [M:0;692b295ad45c:36033 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5fceccd106440a9aefce9077d73a243 2024-11-08T19:50:15,040 DEBUG [M:0;692b295ad45c:36033 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e1b4c0099c94bedbb78dd1f2e3c343a is 69, key is 692b295ad45c,41507,1731095411907/rs:state/1731095412765/Put/seqid=0 2024-11-08T19:50:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741846_1022 (size=5294) 2024-11-08T19:50:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741846_1022 (size=5294) 2024-11-08T19:50:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741846_1022 (size=5294) 2024-11-08T19:50:15,052 INFO [M:0;692b295ad45c:36033 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e1b4c0099c94bedbb78dd1f2e3c343a 2024-11-08T19:50:15,059 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7d71f59686145b78ce88302acc2cb3e as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7d71f59686145b78ce88302acc2cb3e 2024-11-08T19:50:15,067 INFO [M:0;692b295ad45c:36033 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7d71f59686145b78ce88302acc2cb3e, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T19:50:15,073 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5fceccd106440a9aefce9077d73a243 as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5fceccd106440a9aefce9077d73a243 2024-11-08T19:50:15,081 INFO [M:0;692b295ad45c:36033 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5fceccd106440a9aefce9077d73a243, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T19:50:15,082 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e1b4c0099c94bedbb78dd1f2e3c343a as hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e1b4c0099c94bedbb78dd1f2e3c343a 2024-11-08T19:50:15,091 INFO [M:0;692b295ad45c:36033 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43011/user/jenkins/test-data/fefe608c-e03b-dc10-eef6-a960dea4e0f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e1b4c0099c94bedbb78dd1f2e3c343a, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T19:50:15,093 INFO [M:0;692b295ad45c:36033 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false 2024-11-08T19:50:15,096 INFO [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T19:50:15,097 DEBUG [M:0;692b295ad45c:36033 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731095414906Disabling compacts and flushes for region at 1731095414906Disabling writes for close at 1731095414906Obtaining lock to block concurrent updates at 1731095414906Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731095414906Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731095414907 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731095414908 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731095414908Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731095414931 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731095414932 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731095414963 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731095414984 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731095414984Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731095415023 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731095415039 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731095415039Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@101549d3: reopening flushed file at 1731095415058 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@609aa3d3: reopening flushed file at 1731095415067 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4aeae18e: reopening flushed file at 1731095415081 (+14 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false at 1731095415093 (+12 ms)Writing region close event to WAL at 1731095415096 (+3 ms)Closed at 1731095415096 2024-11-08T19:50:15,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:15,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:15,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:15,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:15,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T19:50:15,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37483 is added to blk_1073741830_1006 (size=32683) 2024-11-08T19:50:15,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45381 is added to blk_1073741830_1006 (size=32683) 2024-11-08T19:50:15,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42907 is added to blk_1073741830_1006 (size=32683) 2024-11-08T19:50:15,104 INFO [M:0;692b295ad45c:36033 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T19:50:15,104 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T19:50:15,104 INFO [M:0;692b295ad45c:36033 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36033 2024-11-08T19:50:15,105 INFO [M:0;692b295ad45c:36033 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T19:50:15,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:15,216 INFO [M:0;692b295ad45c:36033 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T19:50:15,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36033-0x1011bffb7800000, quorum=127.0.0.1:50222, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T19:50:15,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@165796ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:15,222 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@176751e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:15,222 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:15,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a2b6be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:15,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d8940e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:15,224 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:15,224 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:15,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:15,224 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-161215812-172.17.0.2-1731095408818 (Datanode Uuid f730e582-a07e-4f3e-8b19-2ca87460b617) service to localhost/127.0.0.1:43011 2024-11-08T19:50:15,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data5/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data6/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,226 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:15,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c49e2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:15,234 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74a88c50{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:15,234 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:15,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ba59100{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:15,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f5c60f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:15,237 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:15,237 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:15,237 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-161215812-172.17.0.2-1731095408818 (Datanode Uuid 92614bba-ef2d-4b1d-a465-2ae644e7fb31) service to localhost/127.0.0.1:43011 2024-11-08T19:50:15,237 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:15,238 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data3/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,238 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data4/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,238 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:15,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6419fd60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T19:50:15,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@356b0e7e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:15,242 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:15,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@436188c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:15,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46f2e60d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:15,243 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T19:50:15,243 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T19:50:15,244 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T19:50:15,244 WARN [BP-161215812-172.17.0.2-1731095408818 heartbeating to localhost/127.0.0.1:43011 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-161215812-172.17.0.2-1731095408818 (Datanode Uuid 192c6f89-a32b-4095-aa3a-62101146b2ea) service to localhost/127.0.0.1:43011 2024-11-08T19:50:15,245 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data2/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,245 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/cluster_42cdef85-3bd7-8176-735f-90bff14955ac/data/data1/current/BP-161215812-172.17.0.2-1731095408818 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T19:50:15,245 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T19:50:15,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75925886{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T19:50:15,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f1e7dec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T19:50:15,254 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T19:50:15,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77ad49ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T19:50:15,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59bbe271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/be03f880-5bdb-efb1-dc81-e156759613c2/hadoop.log.dir/,STOPPED} 2024-11-08T19:50:15,263 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T19:50:15,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T19:50:15,300 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 89) - Thread LEAK? -, OpenFileDescriptor=516 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=298 (was 207) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2446 (was 2919)